public void sync_table_test_thread() throws SQLException, InterruptedException {
long start = System.currentTimeMillis();
SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
//获取要迁移oracle表数据库配置
Connection connection = DruidJdbcUtils.getConnection("db0");
Integer count = 0;
PreparedStatement countStatement = connection.prepareStatement("select count (*) from Table \n");
ResultSet countSet = countStatement.executeQuery(); //查询要迁移数据count总数
while (countSet.next()) {
count = countSet.getInt(1);//获取迁移数据count总数
boolean next = true;
try {
if (count>0) {
try {
//设置20个线程 但是分开之后向下取整 那么可能会多一个
int threadCount = 20;//默认设置20个线程
int listSize = (int) Math.floor((double) count / threadCount) == 0 ? 1 : (int) Math.floor((double) count / threadCount);//设置每个线程执行多少条数据,总数据除以线程数=每个线程执行数
PreparedStatement preparedStatement = connection.prepareStatement("select * from TABLE \n");//查询并获取要迁移的总数据
ResultSet resultSet = preparedStatement.executeQuery();
ResultSetMetaData md = resultSet.getMetaData();//ResultSetMetaData获取列表字段数、字段参数
int colnum = md.getColumnCount();//获取列表字段数:如1行有10个参数值
List listOfRows = new ArrayList();
while (resultSet.next()) {
Map mapOfColValues = new HashMap(colnum);
for (int i = 1; i <= colnum; i++) {
if(md.getColumnName(i).equals("TABLE_CDATE")){
mapOfColValues.put(md.getColumnName(i), resultSet.getDate(i)+" "+resultSet.getTime(i));//获取每条数据的参数名和参数值放到Map中 日期格式单独处理
}else{
mapOfColValues.put(md.getColumnName(i), resultSet.getObject(i));//获取每条数据的参数名和参数值放到Map中
}
}
listOfRows.add(mapOfColValues);//把这条数据赋值后放到list中
}
Map<String, List> stringListHashMap = this.splitList(listOfRows, listSize);//listOfRows数据总数列表,listSize每份执行数量
threadCount = ((HashMap) stringListHashMap).size();//获取存入Map中string,list的份数:这里的话20份
final CountDownLatch latch = new CountDownLatch(threadCount);//线程记数器:这里是20个线程
for (HashMap.Entry<String, List> entry : stringListHashMap.entrySet()) {
Thread thread = new Thread(() -> {
try {
for (Object obj : entry.getValue()) {
Map map = (Map) obj;
//示例以3个字段为例子
String tableUuid = (String) map.get("TABLE_UUID");
String tableItemuuid = (String) map.get("TABLE_ITEMUUID");
String tableCdate = (String) map.get("TABLE_CDATE");
String tableCdate1;
if (tableCdate != null && !"".equals(tableCdate)) {
tableCdate1 = tableCdate;
} else {
tableCdate1 = null;
}
TableNew tableNew= new TableNew ();
tableNew.setTableNewUuid(tableUuid);
tableNew.setTableNewItemuuid(tableItemuuid);
if (tableCdate1 != null && !"".equals(tableCdate1)) {
tableNew.setTableNewCdate(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").parse(tableCdate1));
} else {
tableNew.setTableNewCdate(null);
}
tableNewDao.save(tableNew);//组装老数据到新数据表中
}
} catch (Exception throwables) {
throwables.printStackTrace();
//latch.countDown();
System.out.println("数据操作失败");
}finally {
latch.countDown();
}
});
ThreadUtil.execute(thread);
//latch.countDown();//线程执行完之后-1(这里总共20个,都执行完后就是0)
}
try {
latch.await();//唤醒主线程
long endTimes = System.currentTimeMillis();
System.out.println("所有数据推送执行完毕:" + (endTimes - start));
} catch (Exception e) {
e.printStackTrace();
}
preparedStatement.close();
} catch (SQLException throwables) {
throwables.printStackTrace();
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
countStatement.close();
long end = System.currentTimeMillis();
System.out.println("完成同步,耗时:" + (end - start) + "毫秒");
}
public Map<String, List> splitList(List list, Integer num) { //list总条数(以10000条数据和20个线程为例子=500条/份),每份500 int listSize = list.size(); //list 10000 HashMap<String, List> stringListHashMap = new HashMap<String, List>(); //用户封装返回的多个list List stringlist = new ArrayList<>(); for (int i = 0; i < listSize; i++) { //for循环依次放入每个list中 stringlist.add(list.get(i)); //先将string对象放入list,以防止最后一个没有放入 if (((i + 1) % num == 0) || (i + 1 == listSize)) { //如果l+1 除以 要分的份数 为整除,或者是最后一份,为结束循环.那就算作一份list, stringListHashMap.put("stringList" + i, stringlist); //将这一份放入Map中. stringlist = new ArrayList<>(); //新建一个list,用于继续存储对象 } } return stringListHashMap; //将map返回 }标签:count,Map,java,int,数据库,list,线程,new,多线程 From: https://blog.csdn.net/Q_L_D_X_K/article/details/140692633