Recently encountered this problem in the project, such as the Chinese character "Chong" converted to "51b2", the program is using the Unicode character set, the following is through the collation of data after the solution:
//---------------------------------------------------------------------
Function: W2C
Function: Converts 16-bit wchar_t to 8-bit char[2]
Parameter: W_CN is a 16-bit character to be converted, c_cn[] is the converted 8-bit character
Note: The high byte of the wchar_t should be stored in the low byte of the char array
Author: Chuchong
//---------------------------------------------------------------------
void styleconvert::w2c (wchar_t w_cn, Char c_cn[])
{
Following code convert WCHAR to Char
C_cn[0] = w_cn >> 8;
C_CN[1] = (char) w_cn;
}
//-------------------------------------------------------------------
Function: Convertwchartohex
Function: Converts a 16-bit string to a hexadecimal string
Parameters: the string to be converted, string length
Return value: Converted String
Author: Chuchong
//-------------------------------------------------------------------
CString Styleconvert::convertwchartohex (CString Data, long Ndatalength)
{
CString Sresult ("");
for (long nloop=0; nloop<ndatalength; nloop++)
{
wchar_t ch = data.getat (nloop);
Convert wchar_t to char[2]
Char c_cn[2]={' 0 '};
W2C (CH,C_CN);
static const char *hex = "0123456789ABCDEF";
for (int i=0;i<2;i++)
{
unsigned char chhexa = hex[((unsigned char) (c_cn[i)) >> 4) & 0x0f];
unsigned char Chhexb = hex[(unsigned char) (c_cn[i)) & 0x0f];
Sresult + = (char) chhexa;
Sresult + = (char) Chhexb;
}
}
return sresult;
}