基于SSM基于在线阅读系统mysql数据源mysql数据源1.包含源程序,数据库脚本。代码和数据库脚本都有详细注释。
2.课题设计仅供参考学习使用,可以在此基础上进行扩展完善
开发环境:
Eclipse ,MYSQL,JDK1.8,Tomcat 7
代码已经上传github,下载地址https://github.com/21503882/reading
涉及技术点:
MVC模式、SpringMvc、Mybatis、Spring、EasyUI、HTML、JavaScript、CSS、JQUERY、log4j、Ajax等
系统采用Mybatis框架实现ORM对象关系映射,前台JSP实现,后台springMvc映射,使用Spring框架进行整合。适合学习J2EE的一段时间的熟手,代码思路清晰,注解详细,数据库用的是mysql5.1,服务器用的tomcat7,JDK版本1.8. 编程软件Eclispe J2EE版本。是典型MVC架构,并且前后台分离
主要功能:
package fz.bayes;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.ToolRunner;
import org.apache.mahout.classifier.naivebayes.AbstractNaiveBayesClassifier;
import org.apache.mahout.classifier.naivebayes.BayesUtils;
import org.apache.mahout.classifier.naivebayes.NaiveBayesModel;
import org.apache.mahout.classifier.naivebayes.StandardNaiveBayesClassifier;
import org.apache.mahout.classifier.naivebayes.training.WeightsMapper;
import org.apache.mahout.common.AbstractJob;
import org.apache.mahout.common.HadoopUtil;
import org.apache.mahout.math.Vector;
/**
* ????????Job
* ???
* [
* 2.1,3.2,1.2
2.1,3.2,1.3
]
??????????з???(???????????????)
* @author fansy
*
*/
public class BayesClassifiedJob extends AbstractJob {
/**
* @param args
* @throws Exception
*/
public static void main(String[] args) throws Exception {
ToolRunner.run(new Configuration(), new BayesClassifiedJob(),args);
}
@Override
public int run(String[] args) throws Exception {
addInputOption();
addOutputOption();
addOption("model","m", "The file where bayesian model store ");
addOption("labelIndex","labelIndex", "The file where the index store ");
addOption("labelNumber","ln", "The labels number ");
addOption("mapreduce","mr", "Whether use mapreduce, true use ,else not use ");
addOption("SV","SV","The input vector splitter ,default is comma",",");
if (parseArguments(args) == null) {
return -1;
}
Configuration conf=getConf();
Path input = getInputPath();
Path output = getOutputPath();
String labelNumber=getOption("labelNumber");
String modelPath=getOption("model");
String useMR = getOption("mapreduce");
String SV = getOption("SV");
String labelIndex = getOption("labelIndex");
int returnCode=-1;
if("true".endsWith(useMR)){
returnCode = useMRToClassify(conf,labelNumber,modelPath,input,output,SV,labelIndex);
}else{
returnCode = classify(conf,input, output, labelNumber, modelPath, SV, labelIndex);
}
return returnCode;
}
/**
* ??????
* @param conf
* @param input
* @param output
* @param labelNumber
* @param modelPath
* @param sv
* @param labelIndex
* @return
* @throws IOException
* @throws IllegalArgumentException
*/
private int classify(Configuration conf, Path input ,Path output ,String labelNumber,String modelPath,
String sv,String labelIndex) {
// ?????????
try{
NaiveBayesModel model = NaiveBayesModel.materialize(new Path(modelPath), conf);
AbstractNaiveBayesClassifier classifier = new StandardNaiveBayesClassifier(model);
Map<Integer, String> labelMap = BayesUtils.readLabelIndex(conf, new Path(labelIndex));
Path outputPath =new Path(output,"result");
// ???ж?????????????????д??????????
FileSystem fs =FileSystem.get(input.toUri(),conf);
FSDataInputStream in=fs.open(input);
InputStreamReader istr=new InputStreamReader(in);
BufferedReader br=new BufferedReader(istr);
if(fs.exists(outputPath)){
fs.delete(outputPath, true);
}
FSDataOutputStream out = fs.create(outputPath);
String lines;
StringBuffer buff = new StringBuffer();
while((lines=br.readLine())!=null&&!"".equals(lines)){
String[] line = lines.toString().split(sv);
if(line.length<1){
break;
}
Vector original =BayesUtil.transformToVector(line);
Vector result = classifier.classifyFull(original);
String label = BayesUtil.classifyVector(result, labelMap);
buff.append(lines+sv+label+"\n");
// out.writeUTF(lines+sv+label);
// out.
}
out.writeUTF(buff.substring(0, buff.length()-1));
out.flush();
out.close();
br.close();
istr.close();
in.close();
// fs.close();
}catch(Exception e){
e.printStackTrace();
return -1;
}
return 0;
}
/**
* MR ??
* @param conf
* @param labelNumber
* @param modelPath
* @param input
* @param output
* @param SV
* @param labelIndex
* @return
* @throws IOException
* @throws ClassNotFoundException
* @throws InterruptedException
*/
private int useMRToClassify(Configuration conf, String labelNumber, String modelPath, Path input, Path output,
String SV, String labelIndex) throws IOException, ClassNotFoundException, InterruptedException {
conf.set(WeightsMapper.class.getName() + ".numLabels",labelNumber);
conf.set("SV", SV);
conf.set("labelIndex", labelIndex);
HadoopUtil.cacheFiles(new Path(modelPath), conf);
HadoopUtil.delete(conf, output);
Job job=Job.getInstance(conf, "");
job.setJobName("Use bayesian model to classify the input:"+input.getName());
job.setJarByClass(BayesClassifiedJob.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
job.setMapperClass(BayesClassifyMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setNumReduceTasks(0);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
FileInputFormat.setInputPaths(job, input);
FileOutputFormat.setOutputPath(job, output);
if(job.waitForCompletion(true)){
return 0;
}
return -1;
}
}
package bayes;
import org.apache.poi.xssf.usermodel.XSSFCell;
import org.apache.poi.xssf.usermodel.XSSFRow;
import org.apache.poi.xssf.usermodel.XSSFSheet;
import org.apache.poi.xssf.usermodel.XSSFWorkbook;
import java.io.*;
import java.util.Random;
/**
* Created by linux on 17-3-19.
*/
public class ReadCSV {
private static String gen = "/home/linux/桌面/毕业设计/毕业设计train";
private static String[] str = {"Order", "Commodity", "Price", "Province", "City", "Zipcode", "Name", "Gender"};
private static int start = 0;
private static int num = 0;
private static int total = 0;
private static int data = 1;
public static void main(String[] args) throws Exception {
//train
for (; data <= 3; data++) {
run();
}
data = 1;
//test
gen="/home/linux/桌面/毕业设计/毕业设计test";
run();
}
public static void run() {
if (data == 1) {
start = 0;
num = 0;
total = 80;
} else if (data == 2) {
start = 80;
num = 80;
total = 170;
} else {
start = 170;
num = 170;
total = 250;
}
String file = "/home/linux/桌面/建模数据/建模数据0" + data + ".xlsx";
ReadCSV read = new ReadCSV();
if (data == 1) {
read.truncate(new File(gen));
}
read.createdir(gen);
read.readxslx(file);
}
public void readxslx(String file) {
try {
InputStream in = new FileInputStream(new File(file));
XSSFWorkbook xssfWorkbook = new XSSFWorkbook(in);
// 获取每一个工作薄
for (int numsheet = 0; numsheet < xssfWorkbook.getNumberOfSheets(); numsheet++) {
XSSFSheet xssfSheet = xssfWorkbook.getSheetAt(numsheet);
if (xssfSheet == null) {
continue;
}
// 获取当前工作薄的每一行
for (int rowNum = 1; rowNum <= xssfSheet.getLastRowNum(); rowNum++) {
XSSFRow xssfRow = xssfSheet.getRow(rowNum);
if (xssfRow != null) {
//2
if (data == 2) {
XSSFCell one = xssfRow.getCell(0);
output(str[0], getValue(one));
XSSFCell two = xssfRow.getCell(1);
output(str[1], getValue(two));
XSSFCell three = xssfRow.getCell(2);
output(str[2], getValue(three));
XSSFCell four = xssfRow.getCell(3);
output(str[3], getValue(four));
XSSFCell five = xssfRow.getCell(4);
output(str[4], getValue(five));
XSSFCell six = xssfRow.getCell(5);
output(str[4], getValue(six));
XSSFCell seven = xssfRow.getCell(6);
output(str[5], getValue(seven));
XSSFCell eight = xssfRow.getCell(7);
output(str[6], getValue(eight));
XSSFCell eijiu = xssfRow.getCell(8);
output(str[7], getValue(eijiu));
}
//1
if (data == 1) {
XSSFCell one = xssfRow.getCell(0);
output(str[0], getValue(one));
XSSFCell two = xssfRow.getCell(1);
output(str[1], getValue(two));
XSSFCell three = xssfRow.getCell(2);
output(str[2], getValue(three));
XSSFCell four = xssfRow.getCell(3);
output(str[3], getValue(four));
XSSFCell five = xssfRow.getCell(4);
output(str[4], getValue(five));
XSSFCell six = xssfRow.getCell(5);
output(str[5], getValue(six));
XSSFCell seven = xssfRow.getCell(6);
output(str[6], getValue(seven));
XSSFCell eight = xssfRow.getCell(7);
output(str[7], getValue(eight));
}
//3
if (data == 3) {
XSSFCell one = xssfRow.getCell(0);
output(str[0], getValue(one));
XSSFCell two = xssfRow.getCell(1);
output(str[1], getValue(two));
XSSFCell three = xssfRow.getCell(2);
output(str[2], getValue(three));
XSSFCell four = xssfRow.getCell(3);
output(str[3], getValue(four));
XSSFCell five = xssfRow.getCell(4);
output(str[4], getValue(five));
XSSFCell six = xssfRow.getCell(5);
output(str[5], getValue(six));
XSSFCell seven = xssfRow.getCell(6);
output(str[6], getValue(seven));
XSSFCell eight = xssfRow.getCell(7);
output(str[7], getValue(eight));
}
}
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
//转换数据格式 http://blog.csdn.net/zhengyikuangge/article/details/51524691
private String getValue(XSSFCell xssfRow) {
if (xssfRow != null && !xssfRow.getStringCellValue().equals("0")) {
return String.valueOf(xssfRow.getStringCellValue() + "\n");
}
return null;
}
public void output(String filename, String context) {
if (num == total) num = start;
FileOutputStream fis = null;
String file = gen + "/" + filename + "/" + num++;
try {
File name = new File(file);
fis = new FileOutputStream(name, true);
if (!name.exists()) {
name.createNewFile();
}
if (context != null && context != "") {
if (filename.equals("Province")) {
if (context.contains("市")) {
context = context.replace("市", "省");
}
if (!context.contains("省")) {
context = context.replace("\n", "") + "省" + "\n";
}
}
if (filename.equals("City")) {
if (!context.contains("市")) {
context = context.replace("\n", "") + "市" + "\n";
}
}
if (filename.equals("Order")) {
if (context.length() < 11) {
int l = 11 - context.length();
String s = "";
for (int i = 0; i < l; i++) {
s += "0";
}
context = context.replace("\n", "") + s + "\n";
}
}
if (filename.equals("Zipcode")) {
if (context.length() < 7) {
int l = 7 - context.length();
String s = "";
for (int i = 0; i < l; i++) {
s += "0";
}
context = context.replace("\n", "") + s + "\n";
}
}
fis.write(context.getBytes());
fis.flush();
}
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
try {
fis.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
public void truncate(File files) {
if (files.isFile() || files.list().length == 0) {
files.delete();
} else {
File[] dir = files.listFiles();
for (int i = 0; i < dir.length; i++) {
truncate(dir[i]);
dir[i].delete();
}
}
}
public void createdir(String dir) {
for (int i = 0; i < str.length; i++) {
File file = new File(gen + "/" + str[i]);
if (file.exists()) return;
file.mkdirs();
}
}
}
代码已经上传github,下载地址https://github.com/21503882/reading