I recently encountered this problem when I was working on the project. For example, after converting the Chinese character "Chong" into "51b2 ",ProgramThe Unicode character set is used. The solution is as follows:
//---------------------------------------------------------------------
// Function: W2C
// Function: Convert 16-bit wchar_t to 8-bit char [2]
// Parameter: w_cn is the 16-bit character to be converted, and c_cn [] is the 8-bit character after conversion.
// Note: the high byte of wchar_t should be stored in the low byte of the char array
// Author: chuchong
//---------------------------------------------------------------------
Void styleconvert: W2C (wchar_t w_cn, char c_cn [])
{
// Following code convert wchar to Char
C_cn [0] = w_cn> 8;
C_cn [1] = (char) w_cn;
}
//-------------------------------------------------------------------
// Function: convertwchartohex
// Function: convert a 16-bit string to a hexadecimal string
// Parameter: string to be converted, String Length
// Return value: converted string
// Author: chuchong
//-------------------------------------------------------------------
Cstring styleconvert: convertwchartohex (cstring data, long ndatalength)
{
Cstring sresult ("");
For (long nloop = 0; nloop <ndatalength; nloop ++)
{
Wchar_t CH = data. getat (nloop );
// Convert wchar_t to Char [2]
Char c_cn [2] = {'0 '};
W2C (CH, c_cn );
Static const char * hex = "0123456789 abcdef ";
For (INT I = 0; I <2; I ++)
{
Unsigned char chhexa = hex [(unsigned char) (c_cn [I])> 4) & 0x0f];
Unsigned char chhexb = hex [(unsigned char) (c_cn [I]) & 0x0f];
Sresult + = (char) chhexa;
Sresult + = (char) chhexb;
}
}
Return sresult;
}