fixed transactions
This commit is contained in:
parent
ccc41e7023
commit
7a9b264170
2 changed files with 264 additions and 1225 deletions
File diff suppressed because it is too large
Load diff
|
|
@ -5,6 +5,8 @@ Datenbank Export-Tool für Migration.
|
||||||
|
|
||||||
Dieses Script exportiert alle Daten aus ALLEN PowerOn PostgreSQL-Datenbanken
|
Dieses Script exportiert alle Daten aus ALLEN PowerOn PostgreSQL-Datenbanken
|
||||||
in eine JSON-Datei, die als Migrationsdatensatz verwendet werden kann.
|
in eine JSON-Datei, die als Migrationsdatensatz verwendet werden kann.
|
||||||
|
Zusätzlich wird eine separate JSON-Datei mit nur den Strukturen (ohne Daten)
|
||||||
|
erstellt: <dateiname>_structure.json
|
||||||
|
|
||||||
Datenbanken:
|
Datenbanken:
|
||||||
- poweron_app (User, Mandate, RBAC, Features, etc.)
|
- poweron_app (User, Mandate, RBAC, Features, etc.)
|
||||||
|
|
@ -18,6 +20,7 @@ Verwendung:
|
||||||
|
|
||||||
Optionen:
|
Optionen:
|
||||||
--output, -o Pfad zur Ausgabedatei (Standard: migration_export_<timestamp>.json)
|
--output, -o Pfad zur Ausgabedatei (Standard: migration_export_<timestamp>.json)
|
||||||
|
Die Struktur-Datei wird automatisch als <dateiname>_structure.json erstellt
|
||||||
--pretty, -p JSON formatiert ausgeben (für bessere Lesbarkeit)
|
--pretty, -p JSON formatiert ausgeben (für bessere Lesbarkeit)
|
||||||
--exclude Komma-getrennte Liste von Tabellen, die ausgeschlossen werden sollen
|
--exclude Komma-getrennte Liste von Tabellen, die ausgeschlossen werden sollen
|
||||||
--include-meta System-Metadaten (_createdAt, _modifiedAt, etc.) beibehalten
|
--include-meta System-Metadaten (_createdAt, _modifiedAt, etc.) beibehalten
|
||||||
|
|
@ -150,6 +153,8 @@ def _getDbConnection(dbDatabase: str):
|
||||||
password=dbPassword,
|
password=dbPassword,
|
||||||
cursor_factory=psycopg2.extras.RealDictCursor
|
cursor_factory=psycopg2.extras.RealDictCursor
|
||||||
)
|
)
|
||||||
|
# Autocommit muss VOR set_client_encoding gesetzt werden, um Transaction-Konflikte zu vermeiden
|
||||||
|
conn.autocommit = True
|
||||||
conn.set_client_encoding('UTF8')
|
conn.set_client_encoding('UTF8')
|
||||||
return conn
|
return conn
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|
@ -205,6 +210,222 @@ def _getTableRowCount(conn, tableName: str) -> int:
|
||||||
return result["count"] if result else 0
|
return result["count"] if result else 0
|
||||||
|
|
||||||
|
|
||||||
|
def _getTableStructure(conn, tableName: str) -> Dict[str, Any]:
|
||||||
|
"""Holt die Struktur einer Tabelle (Spalten, Constraints, Indizes) ohne Daten."""
|
||||||
|
structure = {
|
||||||
|
"columns": [],
|
||||||
|
"primaryKeys": [],
|
||||||
|
"foreignKeys": [],
|
||||||
|
"uniqueConstraints": [],
|
||||||
|
"indexes": [],
|
||||||
|
"checkConstraints": []
|
||||||
|
}
|
||||||
|
|
||||||
|
# Connection hat bereits autocommit = True, daher keine Transaction-Probleme
|
||||||
|
with conn.cursor() as cursor:
|
||||||
|
# Spalten-Informationen
|
||||||
|
cursor.execute("""
|
||||||
|
SELECT
|
||||||
|
column_name,
|
||||||
|
data_type,
|
||||||
|
character_maximum_length,
|
||||||
|
numeric_precision,
|
||||||
|
numeric_scale,
|
||||||
|
is_nullable,
|
||||||
|
column_default,
|
||||||
|
udt_name
|
||||||
|
FROM information_schema.columns
|
||||||
|
WHERE table_schema = 'public'
|
||||||
|
AND table_name = %s
|
||||||
|
ORDER BY ordinal_position
|
||||||
|
""", (tableName,))
|
||||||
|
|
||||||
|
for row in cursor.fetchall():
|
||||||
|
colInfo = {
|
||||||
|
"name": row["column_name"],
|
||||||
|
"type": row["data_type"],
|
||||||
|
"udtName": row["udt_name"],
|
||||||
|
"nullable": row["is_nullable"] == "YES",
|
||||||
|
"default": row["column_default"]
|
||||||
|
}
|
||||||
|
|
||||||
|
if row["character_maximum_length"]:
|
||||||
|
colInfo["maxLength"] = row["character_maximum_length"]
|
||||||
|
if row["numeric_precision"]:
|
||||||
|
colInfo["precision"] = row["numeric_precision"]
|
||||||
|
if row["numeric_scale"]:
|
||||||
|
colInfo["scale"] = row["numeric_scale"]
|
||||||
|
|
||||||
|
structure["columns"].append(colInfo)
|
||||||
|
|
||||||
|
# Primary Keys
|
||||||
|
cursor.execute("""
|
||||||
|
SELECT
|
||||||
|
kcu.column_name
|
||||||
|
FROM information_schema.table_constraints tc
|
||||||
|
JOIN information_schema.key_column_usage kcu
|
||||||
|
ON tc.constraint_name = kcu.constraint_name
|
||||||
|
WHERE tc.table_schema = 'public'
|
||||||
|
AND tc.table_name = %s
|
||||||
|
AND tc.constraint_type = 'PRIMARY KEY'
|
||||||
|
ORDER BY kcu.ordinal_position
|
||||||
|
""", (tableName,))
|
||||||
|
|
||||||
|
structure["primaryKeys"] = [row["column_name"] for row in cursor.fetchall()]
|
||||||
|
|
||||||
|
# Foreign Keys
|
||||||
|
cursor.execute("""
|
||||||
|
SELECT
|
||||||
|
kcu.column_name,
|
||||||
|
ccu.table_name AS foreign_table_name,
|
||||||
|
ccu.column_name AS foreign_column_name,
|
||||||
|
tc.constraint_name
|
||||||
|
FROM information_schema.table_constraints AS tc
|
||||||
|
JOIN information_schema.key_column_usage AS kcu
|
||||||
|
ON tc.constraint_name = kcu.constraint_name
|
||||||
|
JOIN information_schema.constraint_column_usage AS ccu
|
||||||
|
ON ccu.constraint_name = tc.constraint_name
|
||||||
|
WHERE tc.constraint_type = 'FOREIGN KEY'
|
||||||
|
AND tc.table_schema = 'public'
|
||||||
|
AND tc.table_name = %s
|
||||||
|
""", (tableName,))
|
||||||
|
|
||||||
|
for row in cursor.fetchall():
|
||||||
|
structure["foreignKeys"].append({
|
||||||
|
"column": row["column_name"],
|
||||||
|
"referencesTable": row["foreign_table_name"],
|
||||||
|
"referencesColumn": row["foreign_column_name"],
|
||||||
|
"constraintName": row["constraint_name"]
|
||||||
|
})
|
||||||
|
|
||||||
|
# Unique Constraints - FIX: Tabellen-Aliase verwenden um ambiguous columns zu vermeiden
|
||||||
|
cursor.execute("""
|
||||||
|
SELECT
|
||||||
|
kcu.column_name,
|
||||||
|
tc.constraint_name
|
||||||
|
FROM information_schema.table_constraints tc
|
||||||
|
JOIN information_schema.key_column_usage kcu
|
||||||
|
ON tc.constraint_name = kcu.constraint_name
|
||||||
|
AND tc.table_schema = kcu.table_schema
|
||||||
|
AND tc.table_name = kcu.table_name
|
||||||
|
WHERE tc.table_schema = 'public'
|
||||||
|
AND tc.table_name = %s
|
||||||
|
AND tc.constraint_type = 'UNIQUE'
|
||||||
|
ORDER BY kcu.ordinal_position
|
||||||
|
""", (tableName,))
|
||||||
|
|
||||||
|
uniqueGroups = {}
|
||||||
|
for row in cursor.fetchall():
|
||||||
|
constraintName = row["constraint_name"]
|
||||||
|
if constraintName not in uniqueGroups:
|
||||||
|
uniqueGroups[constraintName] = []
|
||||||
|
uniqueGroups[constraintName].append(row["column_name"])
|
||||||
|
|
||||||
|
structure["uniqueConstraints"] = [
|
||||||
|
{"columns": cols, "constraintName": name}
|
||||||
|
for name, cols in uniqueGroups.items()
|
||||||
|
]
|
||||||
|
|
||||||
|
# Indizes (ohne Primary Key und Unique Constraints)
|
||||||
|
cursor.execute("""
|
||||||
|
SELECT
|
||||||
|
i.relname AS index_name,
|
||||||
|
a.attname AS column_name,
|
||||||
|
ix.indisunique AS is_unique
|
||||||
|
FROM pg_class t
|
||||||
|
JOIN pg_index ix ON t.oid = ix.indrelid
|
||||||
|
JOIN pg_class i ON i.oid = ix.indexrelid
|
||||||
|
JOIN pg_attribute a ON a.attrelid = t.oid AND a.attnum = ANY(ix.indkey)
|
||||||
|
WHERE t.relkind = 'r'
|
||||||
|
AND t.relname = %s
|
||||||
|
AND NOT ix.indisprimary
|
||||||
|
ORDER BY i.relname, a.attnum
|
||||||
|
""", (tableName,))
|
||||||
|
|
||||||
|
indexGroups = {}
|
||||||
|
for row in cursor.fetchall():
|
||||||
|
indexName = row["index_name"]
|
||||||
|
if indexName not in indexGroups:
|
||||||
|
indexGroups[indexName] = {
|
||||||
|
"name": indexName,
|
||||||
|
"columns": [],
|
||||||
|
"unique": row["is_unique"]
|
||||||
|
}
|
||||||
|
indexGroups[indexName]["columns"].append(row["column_name"])
|
||||||
|
|
||||||
|
structure["indexes"] = list(indexGroups.values())
|
||||||
|
|
||||||
|
# Check Constraints - FIX: Tabellen-Aliase verwenden
|
||||||
|
cursor.execute("""
|
||||||
|
SELECT
|
||||||
|
cc.constraint_name,
|
||||||
|
cc.check_clause
|
||||||
|
FROM information_schema.check_constraints cc
|
||||||
|
JOIN information_schema.constraint_column_usage ccu
|
||||||
|
ON cc.constraint_name = ccu.constraint_name
|
||||||
|
WHERE ccu.table_schema = 'public'
|
||||||
|
AND ccu.table_name = %s
|
||||||
|
""", (tableName,))
|
||||||
|
|
||||||
|
for row in cursor.fetchall():
|
||||||
|
structure["checkConstraints"].append({
|
||||||
|
"constraintName": row["constraint_name"],
|
||||||
|
"checkClause": row["check_clause"]
|
||||||
|
})
|
||||||
|
|
||||||
|
return structure
|
||||||
|
|
||||||
|
|
||||||
|
def _exportSingleDatabaseStructure(
|
||||||
|
dbDatabase: str,
|
||||||
|
excludeTables: List[str]
|
||||||
|
) -> Optional[Dict[str, Any]]:
|
||||||
|
"""Exportiert nur die Struktur einer einzelnen Datenbank (ohne Daten)."""
|
||||||
|
conn = _getDbConnection(dbDatabase)
|
||||||
|
|
||||||
|
if conn is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
allTables = _getTables(conn)
|
||||||
|
|
||||||
|
# System-Tabellen ausschliessen
|
||||||
|
systemTables = ["_system"]
|
||||||
|
tablesToExport = [
|
||||||
|
t for t in allTables
|
||||||
|
if t not in systemTables and t not in excludeTables
|
||||||
|
]
|
||||||
|
|
||||||
|
dbExport = {
|
||||||
|
"tables": {},
|
||||||
|
"summary": {},
|
||||||
|
"tableCount": len(tablesToExport)
|
||||||
|
}
|
||||||
|
|
||||||
|
for tableName in tablesToExport:
|
||||||
|
try:
|
||||||
|
structure = _getTableStructure(conn, tableName)
|
||||||
|
dbExport["tables"][tableName] = structure
|
||||||
|
dbExport["summary"][tableName] = {
|
||||||
|
"columnCount": len(structure["columns"]),
|
||||||
|
"primaryKeyCount": len(structure["primaryKeys"]),
|
||||||
|
"foreignKeyCount": len(structure["foreignKeys"]),
|
||||||
|
"indexCount": len(structure["indexes"])
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(f" {tableName}: {len(structure['columns'])} Spalten")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f" Fehler bei Tabelle {tableName}: {e}")
|
||||||
|
dbExport["tables"][tableName] = {}
|
||||||
|
dbExport["summary"][tableName] = {"error": str(e)}
|
||||||
|
|
||||||
|
return dbExport
|
||||||
|
|
||||||
|
finally:
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
|
||||||
def _exportSingleDatabase(
|
def _exportSingleDatabase(
|
||||||
dbDatabase: str,
|
dbDatabase: str,
|
||||||
excludeTables: List[str],
|
excludeTables: List[str],
|
||||||
|
|
@ -249,6 +470,7 @@ def _exportSingleDatabase(
|
||||||
logger.error(f" Fehler bei Tabelle {tableName}: {e}")
|
logger.error(f" Fehler bei Tabelle {tableName}: {e}")
|
||||||
dbExport["tables"][tableName] = []
|
dbExport["tables"][tableName] = []
|
||||||
dbExport["summary"][tableName] = {"recordCount": 0, "error": str(e)}
|
dbExport["summary"][tableName] = {"recordCount": 0, "error": str(e)}
|
||||||
|
# Bei autocommit = True ist kein rollback() notwendig
|
||||||
|
|
||||||
return dbExport
|
return dbExport
|
||||||
|
|
||||||
|
|
@ -265,6 +487,7 @@ def exportDatabase(
|
||||||
) -> str:
|
) -> str:
|
||||||
"""
|
"""
|
||||||
Exportiert alle Datenbanken in eine JSON-Datei.
|
Exportiert alle Datenbanken in eine JSON-Datei.
|
||||||
|
Erstellt zusätzlich eine separate JSON-Datei mit nur den Strukturen (ohne Daten).
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
outputPath: Pfad zur Ausgabedatei (optional)
|
outputPath: Pfad zur Ausgabedatei (optional)
|
||||||
|
|
@ -310,12 +533,31 @@ def exportDatabase(
|
||||||
"databases": {}
|
"databases": {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Struktur-Export erstellen
|
||||||
|
structureData = {
|
||||||
|
"meta": {
|
||||||
|
"exportedAt": datetime.utcnow().isoformat() + "Z",
|
||||||
|
"exportedFrom": _getConfigValue("APP_ENV_LABEL", "unknown"),
|
||||||
|
"version": "1.0",
|
||||||
|
"databaseCount": 0,
|
||||||
|
"totalTables": 0,
|
||||||
|
"excludedTables": excludeTables,
|
||||||
|
"note": "Nur Strukturen, keine Daten"
|
||||||
|
},
|
||||||
|
"databases": {}
|
||||||
|
}
|
||||||
|
|
||||||
# Jede Datenbank exportieren
|
# Jede Datenbank exportieren
|
||||||
for dbName in databasesToExport:
|
for dbName in databasesToExport:
|
||||||
logger.info(f"Exportiere Datenbank: {dbName}")
|
logger.info(f"Exportiere Datenbank: {dbName}")
|
||||||
|
|
||||||
|
# Daten exportieren
|
||||||
dbExport = _exportSingleDatabase(dbName, excludeTables, includeMeta)
|
dbExport = _exportSingleDatabase(dbName, excludeTables, includeMeta)
|
||||||
|
|
||||||
|
# Struktur exportieren
|
||||||
|
logger.info(f"Exportiere Struktur für Datenbank: {dbName}")
|
||||||
|
dbStructure = _exportSingleDatabaseStructure(dbName, excludeTables)
|
||||||
|
|
||||||
if dbExport is not None:
|
if dbExport is not None:
|
||||||
exportData["databases"][dbName] = dbExport
|
exportData["databases"][dbName] = dbExport
|
||||||
exportData["meta"]["databaseCount"] += 1
|
exportData["meta"]["databaseCount"] += 1
|
||||||
|
|
@ -324,8 +566,13 @@ def exportDatabase(
|
||||||
logger.info(f" -> {dbExport['tableCount']} Tabellen, {dbExport['totalRecords']} Datensätze")
|
logger.info(f" -> {dbExport['tableCount']} Tabellen, {dbExport['totalRecords']} Datensätze")
|
||||||
else:
|
else:
|
||||||
logger.info(f" -> Übersprungen (existiert nicht)")
|
logger.info(f" -> Übersprungen (existiert nicht)")
|
||||||
|
|
||||||
|
if dbStructure is not None:
|
||||||
|
structureData["databases"][dbName] = dbStructure
|
||||||
|
structureData["meta"]["databaseCount"] += 1
|
||||||
|
structureData["meta"]["totalTables"] += dbStructure["tableCount"]
|
||||||
|
|
||||||
# JSON-Datei schreiben
|
# JSON-Datei mit Daten schreiben
|
||||||
logger.info(f"Schreibe Exportdatei: {outputPath}")
|
logger.info(f"Schreibe Exportdatei: {outputPath}")
|
||||||
|
|
||||||
with open(outputPath, "w", encoding="utf-8") as f:
|
with open(outputPath, "w", encoding="utf-8") as f:
|
||||||
|
|
@ -334,16 +581,29 @@ def exportDatabase(
|
||||||
else:
|
else:
|
||||||
json.dump(exportData, f, ensure_ascii=False, default=str)
|
json.dump(exportData, f, ensure_ascii=False, default=str)
|
||||||
|
|
||||||
# Dateigrösse berechnen
|
# JSON-Datei mit Strukturen schreiben
|
||||||
|
structurePath = outputPath.replace(".json", "_structure.json")
|
||||||
|
logger.info(f"Schreibe Struktur-Exportdatei: {structurePath}")
|
||||||
|
|
||||||
|
with open(structurePath, "w", encoding="utf-8") as f:
|
||||||
|
if prettyPrint:
|
||||||
|
json.dump(structureData, f, indent=2, ensure_ascii=False, default=str)
|
||||||
|
else:
|
||||||
|
json.dump(structureData, f, ensure_ascii=False, default=str)
|
||||||
|
|
||||||
|
# Dateigrössen berechnen
|
||||||
fileSize = os.path.getsize(outputPath)
|
fileSize = os.path.getsize(outputPath)
|
||||||
fileSizeStr = _formatFileSize(fileSize)
|
fileSizeStr = _formatFileSize(fileSize)
|
||||||
|
|
||||||
|
structureFileSize = os.path.getsize(structurePath)
|
||||||
|
structureFileSizeStr = _formatFileSize(structureFileSize)
|
||||||
|
|
||||||
logger.info(f"Export abgeschlossen!")
|
logger.info(f"Export abgeschlossen!")
|
||||||
logger.info(f" Datenbanken: {exportData['meta']['databaseCount']}")
|
logger.info(f" Datenbanken: {exportData['meta']['databaseCount']}")
|
||||||
logger.info(f" Tabellen: {exportData['meta']['totalTables']}")
|
logger.info(f" Tabellen: {exportData['meta']['totalTables']}")
|
||||||
logger.info(f" Datensätze: {exportData['meta']['totalRecords']}")
|
logger.info(f" Datensätze: {exportData['meta']['totalRecords']}")
|
||||||
logger.info(f" Dateigrösse: {fileSizeStr}")
|
logger.info(f" Daten-Export: {fileSizeStr} - {outputPath}")
|
||||||
logger.info(f" Ausgabedatei: {outputPath}")
|
logger.info(f" Struktur-Export: {structureFileSizeStr} - {structurePath}")
|
||||||
|
|
||||||
return outputPath
|
return outputPath
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue