gpt4 book ai didi

jpa - 使用 JPA(设计)备份数据库

转载 作者:行者123 更新时间:2023-12-01 02:55:06 28 4
gpt4 key购买 nike

我有一个并不总是有效的工作代码。这是我的方法:

创建备份

  • 为源数据库创建实体管理器
  • 为目标数据库(嵌入式 Derby 数据库)创建实体管理器
  • 复制实体(选择表的所有条目(现在硬编码的表顺序)并将它们复制到目标数据库。基本上是全选和从源分离实体并保留在目标上)
  • 压缩嵌入式 Derby 数据库。

  • 从备份加载
  • 解压备份
  • 执行备份
  • 清理目标数据库(删除所有表)
  • 复制实体

  • 在某些时候,我会使用 JPA 2 元数据来获取要复制的表并选择它们需要复制的顺序(由于约束)。

    出于某种原因,这种方法并不总是有效,因为我看到未恢复的“丢失”条目。

    这是代码:
    package com.bluecubs.xinco.core.server;

    import java.io.File;
    import java.io.FileInputStream;
    import java.io.FileOutputStream;
    import java.io.IOException;
    import java.sql.DriverManager;
    import java.sql.SQLException;
    import java.text.SimpleDateFormat;
    import java.util.Collection;
    import java.util.Collections;
    import java.util.Date;
    import java.util.HashMap;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.logging.Level;
    import java.util.logging.Logger;
    import java.util.zip.ZipEntry;
    import java.util.zip.ZipInputStream;
    import java.util.zip.ZipOutputStream;
    import javax.persistence.EntityManager;
    import javax.persistence.EntityManagerFactory;
    import javax.persistence.Persistence;
    import org.apache.commons.io.FileUtils;
    import org.apache.commons.io.filefilter.IOFileFilter;
    import org.apache.commons.io.filefilter.TrueFileFilter;

    /**
    * This is a complex task and is heavily dependant on the architecture
    * of the database.
    *
    * Data needs to be stored in a particular order into the database to comply
    * with database constraints. This order can be observed in a dump file or
    * create script like the ones generated from MySQL Workbench. Using that
    * should be enough. In case that tool is not available basically the logic is
    * populating tables from the outside inwards. From the tables with no relationships
    * or only one working to the more complex ones. As summary before a table is populated all
    * the related tables should be populated already (if we have identifying relationships.
    *
    * @author Javier A. Ortiz Bultrón <javier.ortiz.78@gmail.com>
    */
    public class XincoBackupManager {

    private static XincoBackupManager instance;
    private static EntityManagerFactory liveEMF;
    private static EntityManagerFactory backupEMF;
    private static EntityManager live, backup;
    private static final ArrayList<String> tables = new ArrayList<String>();
    private static XincoBackupFile last;
    private static String backupPath;
    public static HashMap<String, Integer> stats = new HashMap<String, Integer>();

    static {
    //Non-order-critical tables
    tables.add("XincoCoreAceT");
    tables.add("XincoCoreDataT");
    tables.add("XincoCoreDataTypeAttributeT");
    tables.add("XincoCoreGroupT");
    tables.add("XincoCoreLanguageT");
    tables.add("XincoCoreNodeT");
    tables.add("XincoCoreUserHasXincoCoreGroupT");
    tables.add("XincoCoreUserT");
    tables.add("XincoSettingT");
    tables.add("XincoDependencyTypeT");
    tables.add("XincoCoreDataHasDependencyT");
    tables.add("XincoSetting");
    tables.add("XincoId");
    //Order critical tables
    tables.add("XincoCoreLanguage");
    tables.add("XincoCoreNode");
    tables.add("XincoCoreDataType");
    tables.add("XincoCoreData");
    tables.add("XincoDependencyType");
    tables.add("XincoCoreDataHasDependency");
    tables.add("XincoCoreUser");
    tables.add("XincoCoreUserModifiedRecord");
    tables.add("XincoCoreGroup");
    tables.add("XincoCoreAce");
    tables.add("XincoCoreUserHasXincoCoreGroup");
    tables.add("XincoAddAttribute");
    tables.add("XincoCoreDataTypeAttribute");
    tables.add("XincoCoreLog");
    }

    public static XincoBackupManager get() {
    if (instance == null) {
    instance = new XincoBackupManager();
    }
    return instance;
    }

    private static void setDBSystemDir(String systemDir) {
    // Set the db system directory.
    System.setProperty("derby.system.home", systemDir);
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
    "Derby home set at: {0}", systemDir);
    try {
    //Start the embeded DB
    Class.forName("org.apache.derby.jdbc.EmbeddedDriver").newInstance();
    } catch (ClassNotFoundException ex) {
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
    } catch (InstantiationException ex) {
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
    } catch (IllegalAccessException ex) {
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
    }
    }

    private static void initConnections() {
    try {
    liveEMF = XincoDBManager.getEntityManagerFactory();
    } catch (XincoException ex) {
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
    }
    try {
    backupEMF = Persistence.createEntityManagerFactory("XincoBackup");
    } catch (Exception ex) {
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
    }
    }

    protected static boolean backup() throws XincoException {
    try {
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
    "Initializing connections...");
    initConnections();
    stats.clear();
    backupPath = XincoSettingServer.getSetting("setting.backup.path").getString_value();
    //We need to make sure that there's no one in the database
    XincoDBManager.setLocked(true);
    live = liveEMF.createEntityManager();
    //Prepare the backup repository. Create dirs if needed.
    File backupDir = new File(backupPath);
    backupDir.mkdirs();
    //Create folder for this backup
    SimpleDateFormat format = new SimpleDateFormat("MM-dd-yyyy");
    File backupNewDir = new File(backupPath + System.getProperty("file.separator")
    + format.format(new Date()));
    backupNewDir.mkdirs();
    /*
    * Make sure there's no derby database stuff in the folder.
    * Any previous interrupted backup might left corrupted database files.
    */
    File tempDir = new File(backupNewDir.getAbsolutePath()
    + System.getProperty("file.separator") + "xinco");
    if (tempDir.exists()) {
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.WARNING,
    "Deleting potentially corrupted database files at: {0}", tempDir);
    FileUtils.deleteDirectory(tempDir);
    //Delete Derby log file
    FileUtils.forceDelete(new File(backupNewDir.getAbsolutePath()
    + System.getProperty("file.separator") + "derby.log"));
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.INFO,
    "Done!");
    }
    /**
    * Prepare system to use derby
    */
    setDBSystemDir(backupNewDir.getAbsolutePath());
    backup = backupEMF.createEntityManager();
    for (String s : tables) {
    copyEntities(s, live, backup);
    }
    /**
    * At this point we should have a <Backup Database name> folder in
    * <Backup Path>/<Date>.
    * Lets zip them for storage.
    */
    format = new SimpleDateFormat("MM dd yyyy hh-mm-ss");
    zipBackupFiles(backupNewDir, backupNewDir.getAbsolutePath()
    + System.getProperty("file.separator") + "Xinco Backup " + format.format(new Date()));
    //Stop Derby database in order to delete
    try {
    DriverManager.getConnection("jdbc:derby:;shutdown=true");
    } catch (SQLException e) {
    //When the database shuts down it'll throw an exception
    }
    //Delete backed up files
    String dbName = (String) backup.getProperties().get("javax.persistence.jdbc.url");
    dbName = dbName.substring(dbName.lastIndexOf(":") + 1, dbName.indexOf(";"));
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
    "Deleting temp folder: {0}", dbName);
    FileUtils.deleteDirectory(new File(backupNewDir.getAbsolutePath()
    + System.getProperty("file.separator") + dbName));
    //Delete Derby log file
    FileUtils.forceDelete(new File(backupNewDir.getAbsolutePath()
    + System.getProperty("file.separator") + "derby.log"));
    } catch (XincoException ex) {
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
    XincoDBManager.setLocked(false);
    return false;
    } catch (Exception ex) {
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
    XincoDBManager.setLocked(false);
    return false;
    } finally {
    if (live != null && live.isOpen()) {
    live.close();
    }
    if (backup != null && backup.isOpen()) {
    backup.close();
    }
    if (backupEMF != null && backupEMF.isOpen()) {
    backupEMF.close();
    }
    }
    XincoDBManager.setLocked(false);
    return true;
    }

    private static void zipBackupFiles(File path, String zipName) throws XincoException {
    if (!zipName.endsWith(".zip")) {
    zipName += ".zip";
    }
    // These are the files to include in the ZIP file
    IOFileFilter filter = new IOFileFilter() {

    @Override
    public boolean accept(File file) {

    if (file.isDirectory()) {
    return true;
    }
    //Ignore other backup files
    if (file.isFile() && !file.getName().endsWith(".zip")) {
    return true;
    }
    return false;
    }

    @Override
    public boolean accept(File file, String string) {
    throw new UnsupportedOperationException("Not supported yet.");
    }
    };
    @SuppressWarnings("unchecked")
    Collection<File> fileList = FileUtils.listFiles(path, filter, TrueFileFilter.INSTANCE);
    Object[] files = fileList.toArray();

    // Create a buffer for reading the files
    byte[] buf = new byte[1024];

    try {
    // Create the ZIP file
    ZipOutputStream out = new ZipOutputStream(new FileOutputStream(zipName));

    // Compress the files
    for (int i = 0; i < files.length; i++) {
    FileInputStream in = new FileInputStream((File) files[i]);
    String fileName = ((File) files[i]).getPath();
    //Remove not needed folders
    fileName = fileName.substring(fileName.indexOf(path.getAbsolutePath()) + path.getAbsolutePath().length() + 1);
    // Add ZIP entry to output stream.
    out.putNextEntry(new ZipEntry(fileName));

    // Transfer bytes from the file to the ZIP file
    int len;
    while ((len = in.read(buf)) > 0) {
    out.write(buf, 0, len);
    }

    // Complete the entry
    out.closeEntry();
    in.close();
    last = new XincoBackupFile(new File(zipName));
    }
    // Complete the ZIP file
    out.close();
    } catch (IOException e) {
    throw new XincoException("Error zipping backup: " + e.getLocalizedMessage());
    }
    }

    private static void copyEntities(String table, EntityManager source, EntityManager dest) {
    List<Object> result, result2;
    result = source.createNamedQuery(table + ".findAll").getResultList();
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.INFO,
    "Copying from table: {0}", table);
    int i = 0;
    source.clear();
    for (Object o : result) {
    i++;
    Class<?> persistenceClass = null;
    try {
    persistenceClass = Class.forName("com.bluecubs.xinco.core.server.persistence." + table);
    dest.getTransaction().begin();
    if (dest.contains(persistenceClass.cast(o))) {
    //If no exception do a merge because it exists already
    dest.merge(persistenceClass.cast(o));
    } else {
    dest.persist(persistenceClass.cast(o));
    }
    dest.getTransaction().commit();
    } catch (ClassNotFoundException ex) {
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
    throw new XincoException("No persistence enitiy defined for table: " + table);
    }catch (Exception ex) {
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
    throw new XincoException("Exception copying: " + o);
    }
    }
    stats.put(table, i);
    result2 = dest.createNamedQuery(table + ".findAll").getResultList();
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.INFO,
    "Copying for table: {0} completed! Amount of records: {1}",
    new Object[]{table, i});
    //Make sure the copy is accurate.
    //TODO: For some reason XincoId always return twice the amount of records during this routine.
    if (result2.size() != result.size() && !table.equals("XincoId")) {
    throw new XincoException("Error copying records for table " + table + ". Got " + result2.size() + " instead of " + result.size());
    }
    result2.clear();
    }

    @SuppressWarnings({"unchecked"})
    public static ArrayList<XincoBackupFile> getBackupFiles() throws XincoException {
    // These are the files to include in the ZIP file
    IOFileFilter filter = new IOFileFilter() {

    @Override
    public boolean accept(File file) {
    //Only zip files
    if (file.isFile() && file.getName().endsWith(".zip")
    && file.getName().startsWith("Xinco Backup")) {
    return true;
    }
    return false;
    }

    @Override
    public boolean accept(File file, String string) {
    throw new UnsupportedOperationException("Not supported yet.");
    }
    };
    Collection<File> files = FileUtils.listFiles(
    new File(backupPath), filter, TrueFileFilter.INSTANCE);
    ArrayList<XincoBackupFile> backupFiles = new ArrayList<XincoBackupFile>();
    for (File f : files) {
    backupFiles.add(new XincoBackupFile(f));
    }
    //Sort
    Collections.sort(backupFiles, new XincoBackupComparator());
    //Sorted from oldest to newer so we need to invert the list.
    Collections.reverse(backupFiles);
    return backupFiles;
    }

    protected static boolean restoreFromBackup(XincoBackupFile backupFile) throws XincoException {
    try {
    stats.clear();
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
    "Restoring database from: {0}", backupFile.getName());
    //First make a backup of current database just in case
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
    "Creating a restore point for your current database...");
    backup();
    //We need to make sure that there's no one in the database
    XincoDBManager.setLocked(true);
    //Load database from the provided backup
    loadDatabaseFromBackup(backupFile);
    XincoDBManager.setLocked(false);
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
    "Restore complete!");
    try {
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
    "Deleting restore point...");
    FileUtils.forceDelete(last);
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
    "Done!");
    } catch (IOException ex) {
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
    }
    return true;
    } catch (XincoException ex) {
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
    //Recover from last backup
    loadDatabaseFromBackup(getLast());
    XincoDBManager.setLocked(false);
    throw new XincoException("Unable to load backup! Database reverted to original state. \n" + ex.getMessage());
    }
    }

    protected static void loadDatabaseFromBackup(XincoBackupFile backupFile) throws XincoException {
    EntityManager backupEM = null;
    try {
    initConnections();
    live = liveEMF.createEntityManager();
    //Unzip backup
    unzipBackup(backupFile);
    //Delete current database (inverse order than writing)
    Collections.reverse(tables);
    for (String s : tables) {
    clearTable(s, live);
    }
    //Get back to original order
    Collections.reverse(tables);
    //Make derby start where the backup is
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
    "Connecting to backup data...");
    setDBSystemDir(backupPath + "Temp"
    + System.getProperty("file.separator"));
    //Connect to backup database
    backupEM = Persistence.createEntityManagerFactory("XincoBackup").createEntityManager();
    //Start copying
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
    "Starting loading entities...");
    for (String s : tables) {
    //Copy values from backup
    copyEntities(s, backupEM, live);
    }
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
    "Load complete!");
    //Stop Derby database in order to delete
    DriverManager.getConnection("jdbc:derby:;shutdown=true");
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
    "Delete temp folder!");
    try {
    FileUtils.deleteDirectory(new File(System.getProperty("derby.system.home")));
    } catch (IOException ex) {
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
    }
    } catch (SQLException e) {
    //When the database shuts down it'll throw an exception
    } finally {
    if (live != null && live.isOpen()) {
    live.close();
    }
    if (backupEM != null && backupEM.isOpen()) {
    backupEM.close();
    }
    }
    }

    private static void unzipBackup(XincoBackupFile backup) {
    try {
    //Make sure that the temp directory is empty before unzipping
    FileUtils.deleteDirectory(new File(backupPath
    + System.getProperty("file.separator") + "Temp"));
    byte[] buf = new byte[1024];
    ZipInputStream zipinputstream = null;
    ZipEntry zipentry;
    zipinputstream = new ZipInputStream(
    new FileInputStream(backup.getBackupFile()));
    zipentry = zipinputstream.getNextEntry();
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
    "Unzipping backup file: {0}", backup.getName());
    while (zipentry != null) {
    //for each entry to be extracted
    String entryName = zipentry.getName();
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
    "Extracting file: {0}", entryName);
    int n;
    FileOutputStream fileoutputstream;
    File newFile = new File(entryName);
    String directory = newFile.getParent();

    if (directory == null) {
    if (newFile.isDirectory()) {
    break;
    }
    }
    if (entryName.contains(System.getProperty("file.separator"))) {
    //Create any internal folders required
    new File(backupPath
    + System.getProperty("file.separator") + "Temp"
    + System.getProperty("file.separator") + entryName.substring(
    0, entryName.lastIndexOf(
    System.getProperty("file.separator")))).mkdirs();
    } else {
    File tempDir = new File(backupPath
    + System.getProperty("file.separator") + "Temp"
    + System.getProperty("file.separator"));
    tempDir.mkdirs();
    }
    fileoutputstream = new FileOutputStream(backupPath
    + System.getProperty("file.separator") + "Temp"
    + System.getProperty("file.separator") + entryName);

    while ((n = zipinputstream.read(buf, 0, 1024)) > -1) {
    fileoutputstream.write(buf, 0, n);
    }

    fileoutputstream.close();
    zipinputstream.closeEntry();
    zipentry = zipinputstream.getNextEntry();

    }//while
    zipinputstream.close();
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
    "Unzipping complete!");
    } catch (Exception e) {
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE,
    "Error unzipping file!", e);
    }
    }

    private static void clearTable(String table, EntityManager target) throws XincoException {
    try {
    List<Object> result;
    result = target.createNamedQuery(table + ".findAll").getResultList();
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
    "Cleaning table: {0}", table);
    int i = 0;
    Class<?> serverClass = null;
    boolean special = false;
    try {
    serverClass = Class.forName("com.bluecubs.xinco.core.server." + table + "Server");
    special = serverClass.newInstance() instanceof XincoCRUDSpecialCase;
    } catch (ClassNotFoundException ex) {
    try {
    //Class doesn't exist, try in the add folder
    serverClass = Class.forName("com.bluecubs.xinco.add.server." + table + "Server");
    special = serverClass.newInstance() instanceof XincoCRUDSpecialCase;
    } catch (ClassNotFoundException ex1) {
    } catch (InstantiationException ex1) {
    } catch (NoClassDefFoundError ex1) {
    }
    } catch (InstantiationException ex) {
    } catch (NoClassDefFoundError ex) {
    }
    if (serverClass != null && special) {
    ((XincoCRUDSpecialCase) serverClass.newInstance()).clearTable();
    special = false;
    } else {
    for (Object o : result) {
    i++;
    try {
    Class<?> persistenceClass = Class.forName("com.bluecubs.xinco.core.server.persistence." + table);
    target.getTransaction().begin();
    target.remove(persistenceClass.cast(o));
    target.getTransaction().commit();
    } catch (ClassNotFoundException ex) {
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
    }
    }
    }
    result = target.createNamedQuery(table + ".findAll").getResultList();
    if (!result.isEmpty()) {
    throw new IllegalStateException("Unable to delete entities: " + result.size());
    }
    stats.put(table, i);
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
    "Cleaning table: {0} completed! Amount of records removed: {1}", new Object[]{table, i});
    } catch (IllegalAccessException ex) {
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
    } catch (InstantiationException ex) {
    Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
    }
    }

    /**
    * @return the last
    */
    public static XincoBackupFile getLast() {
    return last;
    }
    }

    设计上有什么缺陷吗?
    有更好的方法吗?
    任何评论都非常受欢迎!

    最佳答案

    Any flaw in the design? A better way of doing it? Any comment is more than welcomed!



    大多数数据库引擎提供命令或工具允许转储给定数据库的内容(其中一些甚至支持增量备份)。当您拥有 时,JPA 只会降低效率、更复杂准备使用 解决方案,所以我没有看到使用 JPA 来完成这项任务的意义。

    对于 Derby,实际上没有什么可做的:只需 zip/tar(或使用 rsync)数据库文件就完成了。

    如果要将一个数据库引擎的内容复制到另一个引擎,请使用 ETL。

    也可以看看
  • How i can Dump a derby database into an sql file?
  • 关于jpa - 使用 JPA(设计)备份数据库,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/3259809/

    28 4 0
    Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
    广告合作:1813099741@qq.com 6ren.com