需求
客户要求对一台mysql数据做备份,但是因为数据量过大,只能做到重要数据全量备份,非重要数据增量备份。决定采用扫描schema表,如果数据表过大,只备份最新n条数据,否则的话全量备份。生成mysqldump脚本,然后执行脚本进行数据库备份。
查看表大小
#其中table_schema是库名
SELECT CONCAT( table_schema, '.', table_name ) AS 'Table Name', table_rows AS 'Number of Rows', CONCAT( ROUND( data_length / (1024 * 1024 * 1024), 4 ), 'G' ) AS 'Data Size',? CONCAT( ROUND( index_length / (1024 * 1024 * 1024), 4 ? ? ? ?), 'G' ) AS 'Index Size',? CONCAT( ROUND( (data_length + index_length) / (1024 * 1024 * 1024), 4 ), 'G' ) AS 'Total' ?FROM information_schema.TABLES WHERE table_schema LIKE 'zx' ORDER BY table_name desc;
查看自增字段
#其中table_schema是库名?table_name是表名
SELECT * FROM INFORMATION_SCHEMA. COLUMNS? WHERE table_schema = 'zx' AND table_name = 'anlian_host_result' and ?extra='auto_increment'?
#如果是查看所有字段情况,则是
SELECT * FROM INFORMATION_SCHEMA. COLUMNS? WHERE table_schema = 'zx' AND table_name = 'anlian_host_result'
mysqldump脚本
根据自增id增量备份
mysqldump -h 192.168.xx.xx -P3307 -u xx -pxx zx site_result --table ?-w" 1=1 order by auto_id desc limit 100" --default-character-set=utf8 --skip-lock-table > ./zx/site_result.sql
全量备份
mysqldump -h 192.168.xx.xx -P3307 -u xx -pxx zx site_result ? ? --default-character-set=utf8 --skip-lock-table > ./site_result.sql
执行脚本
#!/bin/sh source /etc/profile path=`pwd`/ cd `dirname $0` && cd ../ nohup java -cp ?./year_counter_2021.jar:lib/* com.isi.dbsyn.DumpCreater test 100 $path && cd ?dump_`date +"%Y_%m_%d"` && `nohup bash ./test_dump.sh >/dev/null &` &
?主代码
public class DumpCreater {
private String ip;
private int port;
private String uname;
private String pwd;
private String db_name;
private String dump_sh_dir;
private long dump_max_size;
private static final String table_schema_tmp="SELECT table_schema,table_name, CONCAT( table_schema, '.', table_name ) AS 'Table Name', table_rows AS 'Number of Rows', CONCAT( ROUND( data_length / (1024 * 1024 * 1024), 4 ), 'G' ) AS 'Data Size', CONCAT( ROUND( index_length / (1024 * 1024 * 1024), 4 ), 'G' ) AS 'Index Size', CONCAT( ROUND( (data_length + index_length) / (1024 * 1024 * 1024), 4 ), 'G' ) AS 'Total' FROM information_schema.TABLES WHERE table_schema LIKE '[db]' ORDER BY table_name desc";
private static final String id_column_schema_tmp="SELECT * FROM INFORMATION_SCHEMA. COLUMNS WHERE table_schema = '[db]' AND table_name = '[table]' and extra='auto_increment'";
private String part_dump_sh_tmp="mysqldump -h [ip] -P[port] -u [uname] -p[pwd] [db] [table] --table -w\" 1=1 order by [id] desc limit [dump_max_size]\" --default-character-set=utf8 --skip-lock-table > [out_file]";
private String total_dump_sh_tmp="mysqldump -h [ip] -P[port] -u [uname] -p[pwd] [db] [table] --default-character-set=utf8 --skip-lock-table > [out_file]";
private List<Record> getTables(){
List<Record> records = JfinalDbUtil.getDb(JfinalDbUtil.source_name).find(table_schema_tmp.replace("[db]", db_name));
return records;
}
private String getIdColumn(String db,String table){
Record idColumn = JfinalDbUtil.getDb(JfinalDbUtil.source_name).findFirst(id_column_schema_tmp.replace("[db]", db).replace("[table]", table));
if(idColumn==null){
return null;
}else {
return idColumn.getStr("column_name");
}
}
private String getDump(String table,Long dump_max_size,String idColumn,String out_file, boolean total){
if(total){
return total_dump_sh_tmp
.replace("[ip]",ip)
.replace("[port]",port+"")
.replace("[uname]",uname)
.replace("[pwd]",pwd)
.replace("[db]",db_name)
.replace("[table]",table)
.replace("[out_file]",out_file)
;
}else {
return part_dump_sh_tmp
.replace("[ip]",ip)
.replace("[port]",port+"")
.replace("[uname]",uname)
.replace("[pwd]",pwd)
.replace("[db]",db_name)
.replace("[table]",table)
.replace("[id]",idColumn)
.replace("[dump_max_size]",dump_max_size+"")
.replace("[out_file]",out_file)
;
}
}
private List<String> getDumpShs( ){
List<String> dumpShs=new ArrayList<>();
List<Record> tables = getTables();
for (Record table : tables) {
Integer rows = table.getInt("Number of Rows");
String table_name = table.getStr("table_name");
String idColumn = getIdColumn(db_name, table_name);
File dbDirFile = new File(dump_sh_dir+"/"+db_name);
if(!dbDirFile.exists()){
dbDirFile.mkdirs();
}
String out_path="./"+db_name+"/"+table_name+".sql";
if(rows!=null && rows>dump_max_size && idColumn!=null){
//大于阈值且有增量id字段 增量备份
String dump = getDump(table_name, dump_max_size, idColumn, out_path, false);
dumpShs.add(dump);
}else {
//全量备份
String dump = getDump(table_name, dump_max_size, idColumn, out_path, true);
dumpShs.add(dump);
}
}
return dumpShs;
}
public void exe(){
List<String> dumpShs = getDumpShs();
File dump_sh_file = new File(dump_sh_dir);
String shell_path = dump_sh_dir + File.separator + db_name + "_dump.sh";
if(dump_sh_file.exists()){
dump_sh_file.delete();
}
File pFile = dump_sh_file.getParentFile();
if(pFile.exists()==false){
pFile.mkdirs();
}else{
for (String dumpSh : dumpShs) {
MyIOUtils.appendLine(shell_path,dumpSh,"utf-8");
}
}
}
public DumpCreater(String ip, int port, String uname, String pwd, String db_name, String dump_sh_dir, long dump_max_size) {
this.ip = ip;
this.port = port;
this.uname = uname;
this.pwd = pwd;
this.db_name = db_name;
this.dump_sh_dir = dump_sh_dir;
this.dump_max_size = dump_max_size;
}
}
|