T
Thema203
Hi;
I am working on a linker program for Linux64. The development environment however
is running Windows 7 Professional 64 Bit. In other words, I am using a Windows Operating
System to develop the linker. Everything is being done in Windows, including testing and
when it's finished, I will do an overall test in Linux64. After getting errors with type-casting
I decided to create a function called 'ConvertStringToBinary' which takes a string of binary
numbers and converts it into a number. However, I am having some problems with that
function, where it changes 'tmpn4' to 0 after exceeding the maximum 32-bit number. I
guess that it is still using 32-bit registers when it multiplies 'tmpn4' by 16. The code is
below;
TRESULT ConvertStringToBinary(_IN char *instr, _IN int inn, _INOUT long long *outn){
//Variable initializations.
char *tmpstr = 0;
long long tmpn2 = 0;
long long tmpn3 = 0;
long long tmpn4 = 0;
long long tmpn5 = 0;
int tmpn = 0;
int ret = 0;
int i = 0;
//Argument checks.
if(instr == NULL){
printf("Error util0000050 - NULL pointer received in ConvertStringToBinary.\n");
return TPTRISNULL;
}
if(outn == NULL){
printf("Error util0000050 - NULL pointer received in ConvertStringToBinary.\n");
return TPTRISNULL;
}
if(inn == 0){
printf("Error util0000052 - Zero length received in ConvertStringToBinary.\n");
return TIDXISZERO;
}
//Initializations.
tmpstr = instr;
tmpn = inn;
tmpn4 = 16;
//Main logic.
//Convert the string to binary.
for(i = 0; i < tmpn; i++){
tmpn2 = (tmpstr & 0x0F);
if(i != 0){
tmpn2 = (tmpn2 * tmpn4);
tmpn4 = (tmpn4 * 16);
}
tmpn3 = (tmpstr & 0xF0);
tmpn3 = (tmpn3 >> 4);
tmpn3 = (tmpn3 * tmpn4);
tmpn4 = (tmpn4 * 16);
tmpn5 = (tmpn5 + tmpn2 + tmpn3);
}
//Returns
*outn = tmpn5;
return TSUCCESS;
}
Can anyone solve this. If they do then I will very happy.
Thema
Continue reading...
I am working on a linker program for Linux64. The development environment however
is running Windows 7 Professional 64 Bit. In other words, I am using a Windows Operating
System to develop the linker. Everything is being done in Windows, including testing and
when it's finished, I will do an overall test in Linux64. After getting errors with type-casting
I decided to create a function called 'ConvertStringToBinary' which takes a string of binary
numbers and converts it into a number. However, I am having some problems with that
function, where it changes 'tmpn4' to 0 after exceeding the maximum 32-bit number. I
guess that it is still using 32-bit registers when it multiplies 'tmpn4' by 16. The code is
below;
TRESULT ConvertStringToBinary(_IN char *instr, _IN int inn, _INOUT long long *outn){
//Variable initializations.
char *tmpstr = 0;
long long tmpn2 = 0;
long long tmpn3 = 0;
long long tmpn4 = 0;
long long tmpn5 = 0;
int tmpn = 0;
int ret = 0;
int i = 0;
//Argument checks.
if(instr == NULL){
printf("Error util0000050 - NULL pointer received in ConvertStringToBinary.\n");
return TPTRISNULL;
}
if(outn == NULL){
printf("Error util0000050 - NULL pointer received in ConvertStringToBinary.\n");
return TPTRISNULL;
}
if(inn == 0){
printf("Error util0000052 - Zero length received in ConvertStringToBinary.\n");
return TIDXISZERO;
}
//Initializations.
tmpstr = instr;
tmpn = inn;
tmpn4 = 16;
//Main logic.
//Convert the string to binary.
for(i = 0; i < tmpn; i++){
tmpn2 = (tmpstr & 0x0F);
if(i != 0){
tmpn2 = (tmpn2 * tmpn4);
tmpn4 = (tmpn4 * 16);
}
tmpn3 = (tmpstr & 0xF0);
tmpn3 = (tmpn3 >> 4);
tmpn3 = (tmpn3 * tmpn4);
tmpn4 = (tmpn4 * 16);
tmpn5 = (tmpn5 + tmpn2 + tmpn3);
}
//Returns
*outn = tmpn5;
return TSUCCESS;
}
Can anyone solve this. If they do then I will very happy.
Thema
Continue reading...