The following code is only used for my review. The practicality of N is low ~~ Haha:>
//// Convert decimal to binary. /// # include <cstdlib> # include <iostream> # include <vector> // use the unsigned long value to represent the binary number, // because the number of binary digits is limited, the input parameter is of the unsigned short type. // unsigned long tobinary (unsigned short decimal) {long binary = 0; long carry = 1; while (0 <decimal) {binary + = decimal % 2? Carry: 0; carry * = 10; decimal/= 2;} return binary;} // use a vector to save the binary string. // void tobinary_vector (unsigned long decimal, STD: vector <char> & Binary) {While (0 <decimal) {binary. push_back (decimal % 2 + '0'); decimal/= 2 ;}/// test. // int main (void) {STD: cout <"input decimal number:"; unsigned short decimal; while (! (STD: CIN> decimal) {STD: cin. sync (); STD: cin. clear (); STD: cout <"input wrong, please input again:";} STD: cout <"binary:" <tobinary (decimal) <STD: Endl; STD: vector <char> binary; tobinary_vector (decimal, binary); STD: vector <char >:: iterator beg = binary. begin (); STD: vector <char >:: iterator end = binary. end (); STD: cout <"binary:"; while (beg! = End --) {STD: cout <* end;} STD: cout <STD: Endl; return exit_success ;}