<strong><span style= "FONT-SIZE:18PX;" >/*** * @author yangxin * @info This code shows how to encode all the words in the text and then generate the sum of the linear weights for each word encoding, which encodes the text as vectors. This is achieved with staticwordvalueencoder, and there is a way to decompose the text * or analyze the word. Mahout provides an editor, Lucene provides a parser. */package Unitfourteen;import Java.io.ioexception;import Java.io.stringreader;import Org.apache.commons.collections.bag.synchronizedsortedbag;import Org.apache.lucene.analysis.analyzer;import Org.apache.lucene.analysis.tokenstream;import Org.apache.lucene.analysis.standard.standardanalyzer;import Org.apache.lucene.analysis.tokenattributes.termattribute;import Org.apache.lucene.util.version;import Org.apache.mahout.math.randomaccesssparsevector;import Org.apache.mahout.math.SequentialAccessSparseVector; Import Org.apache.mahout.math.vector;import Org.apache.mahout.vectorizer.encoders.featurevectorencoder;import Org.apache.mahout.vectorizer.encoders.staticwordvalueencoder;public class Tokenizingandvectorizingtext {public static void Main (string[] args) throws IOException {FeaturEvectorencoder encoder = new Staticwordvalueencoder ("text"); Analyzer Analyzer = new StandardAnalyzer (version.lucene_31); StringReader in = new StringReader ("Text to magically vectorize"); Tokenstream ts = analyzer.tokenstream ("Body", in); Termattribute Termatt = Ts.addattribute (Termattribute.class); Vector v1 = new Randomaccesssparsevector (100); while (Ts.incrementtoken ()) {char[] Termbuffer = Termatt.termbuffer (); int termlen = Termatt.termlength (); String w = new string (termbuffer, 0, Termlen); Encoder.addtovector (W, 1, v1); }system.out.printf ("%s\n", New Sequentialaccesssparsevector (v1));} </span></strong>
Lexical and vectorization of text