|
@@ -0,0 +1,200 @@
|
|
|
|
+#! /usr/bin/env python3
|
|
|
|
+# -*- coding: utf-8 -*-
|
|
|
|
+
|
|
|
|
+# Moulinette permettant de produire des relevés de comptes mensuels
|
|
|
|
+# au format CSV à partir d'exports CSV venant de l'interface Web du
|
|
|
|
+# crédit coopératif et ayant des chevauchements.
|
|
|
|
+
|
|
|
|
+import os, sys, json
|
|
|
|
+import csv
|
|
|
|
+from collections import OrderedDict
|
|
|
|
+from datetime import datetime
|
|
|
|
+import hashlib
|
|
|
|
+import locale
|
|
|
|
+locale.setlocale(locale.LC_ALL, 'fr_FR.UTF-8')
|
|
|
|
+
|
|
|
|
+class CsvStatementParser(object):
|
|
|
|
+
|
|
|
|
+ def __init__(self):
|
|
|
|
+ self.lines = OrderedDict()
|
|
|
|
+ self.fieldnames = None
|
|
|
|
+ self.date_fieldname = "Date"
|
|
|
|
+ self.overlap_detector = {}
|
|
|
|
+ self.first_ops = {}
|
|
|
|
+ self.last_ops = {}
|
|
|
|
+ self.daterange = [datetime.now(), datetime.fromordinal(1)]
|
|
|
|
+ self.dups = dict() # holds counters for duplicate lines
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ def parse(self, filename):
|
|
|
|
+ with open(filename, encoding='iso-8859-1') as csvfile:
|
|
|
|
+ reader = csv.DictReader(csvfile, delimiter=';')
|
|
|
|
+ if self.fieldnames is None:
|
|
|
|
+ # Le premier fichier parcourru détermine les noms de
|
|
|
|
+ # colonnes attendus dans les prochains fichiers.
|
|
|
|
+ self.fieldnames = [k for k in reader.fieldnames if k != '']
|
|
|
|
+
|
|
|
|
+ # On identifie également la permière colonne qui
|
|
|
|
+ # ressemble à une date, elle servira ensuite de clef
|
|
|
|
+ # d'indexation.
|
|
|
|
+ for fname in self.fieldnames:
|
|
|
|
+ if "date" in fname.lower():
|
|
|
|
+ self.date_fieldname = fname
|
|
|
|
+ break
|
|
|
|
+
|
|
|
|
+ if self.fieldnames != [k for k in reader.fieldnames if k != '']:
|
|
|
|
+ print("""Fichier ignoré : %s. Cause: does not have the expected column names.
|
|
|
|
+ Found: %s
|
|
|
|
+ Expected: %s
|
|
|
|
+""" % (filename, ",".join(reader.fieldnames), ",".join(self.fieldnames)))
|
|
|
|
+ else:
|
|
|
|
+ self._parse_file(filename, reader)
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ def _parse_file(self, filename, reader):
|
|
|
|
+ self.dups = dict() # Duplicate counters must be reset for each file
|
|
|
|
+ print("Lecture du fichier %s" % os.path.basename(filename))
|
|
|
|
+ for row in reader:
|
|
|
|
+ opdate = datetime.strptime(row[self.date_fieldname], '%d/%m/%Y')
|
|
|
|
+ ophash = datetime.strftime(opdate, '%Y-%m-%d') + hashlib.md5(json.dumps(row).encode()).hexdigest()
|
|
|
|
+ # Special use case: one file contains multiple identical lines.
|
|
|
|
+ # Then we append a counter to the duplicate ophash.
|
|
|
|
+ if ophash in self.lines:
|
|
|
|
+ print("*** Duplicate line found in {}: {}".format(filename, ';'.join(row.values())))
|
|
|
|
+ if ophash not in self.dups:
|
|
|
|
+ self.dups[ophash] = 1
|
|
|
|
+ self.dups[ophash] = self.dups[ophash] + 1
|
|
|
|
+ ophash = ophash + "-" + str(self.dups[ophash])
|
|
|
|
+ # print(" We have now :\n {}\n {}".format("\n ".join([h + " // " + "".join(v.values()) for h,v in self.lines.items() if h.startswith(ophash[0:10])]), ophash + " // " + "".join(row.values()))) # XXX DEBUG
|
|
|
|
+ self.lines[ophash] = {k:v for k,v in row.items() if k != ''}
|
|
|
|
+ # Adjust dateranges
|
|
|
|
+ if opdate < self.daterange[0]:
|
|
|
|
+ self.daterange[0] = opdate
|
|
|
|
+ if opdate > self.daterange[1]:
|
|
|
|
+ self.daterange[1] = opdate
|
|
|
|
+ # Prepare overlap detection
|
|
|
|
+ if ophash not in self.overlap_detector:
|
|
|
|
+ self.overlap_detector[ophash] = set()
|
|
|
|
+ self.overlap_detector[ophash].add(filename)
|
|
|
|
+ # Remember first line of each CSV file
|
|
|
|
+ if filename not in self.first_ops:
|
|
|
|
+ self.first_ops[filename] = ophash
|
|
|
|
+ # Remember last line of each CSV file
|
|
|
|
+ if filename not in self.last_ops:
|
|
|
|
+ self.last_ops[filename] = ophash
|
|
|
|
+ # CSV files are sometimes sorted by date ASC and sometimes
|
|
|
|
+ # sorted by date DESC. So we may need to swap first_op and last_op.
|
|
|
|
+ if (int(self.first_ops[filename][0:10].replace('-', '')) > int(self.last_ops[filename][0:10].replace('-', ''))):
|
|
|
|
+ tmp = self.first_ops[filename]
|
|
|
|
+ self.first_ops[filename] = self.last_ops[filename]
|
|
|
|
+ self.last_ops[filename] = tmp
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ def dump_full(self, output_filename):
|
|
|
|
+ with open(output_filename, 'w') as outfile:
|
|
|
|
+ writer = csv.DictWriter(outfile, self.fieldnames, delimiter=';')
|
|
|
|
+ writer.writeheader()
|
|
|
|
+ for line in reversed(sorted(self.lines.items())):
|
|
|
|
+ writer.writerow(line[1])
|
|
|
|
+ print("Relevé intégral généré dans le fichier %s" % os.path.abspath(output_filename))
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ def dump_monthly_reports(self, outputdir):
|
|
|
|
+ firstmonth = int('{:%Y%m}'.format(self.daterange[0])) + 1
|
|
|
|
+ lastmonth = int('{:%Y%m}'.format(self.daterange[1])) - 1
|
|
|
|
+ if firstmonth >= lastmonth:
|
|
|
|
+ print("Impossible de générer des relevés mensuels car la plage de dates traitée est trop petite.")
|
|
|
|
+ return
|
|
|
|
+ curmonth = firstmonth
|
|
|
|
+ def __openfile__(curmonth):
|
|
|
|
+ fname = "releve_{0}.csv".format(curmonth)
|
|
|
|
+ outfile = open(os.path.join(outputdir, fname), 'w')
|
|
|
|
+ writer = csv.DictWriter(outfile, self.fieldnames, delimiter=';')
|
|
|
|
+ writer.writeheader()
|
|
|
|
+ return outfile, writer
|
|
|
|
+ outfile, writer = __openfile__(curmonth)
|
|
|
|
+ writer = csv.DictWriter(outfile, self.fieldnames, delimiter=';')
|
|
|
|
+ for line in sorted(self.lines.items()):
|
|
|
|
+ month = int(line[0][0:4] + line[0][5:7])
|
|
|
|
+ if month < curmonth:
|
|
|
|
+ continue
|
|
|
|
+ if month > lastmonth:
|
|
|
|
+ break
|
|
|
|
+ if month > curmonth:
|
|
|
|
+ outfile.close()
|
|
|
|
+ curmonth = month
|
|
|
|
+ if month in self.badmonths:
|
|
|
|
+ outfile, writer = __openfile__(str(curmonth) + "_potentiellement_incomplet")
|
|
|
|
+ else:
|
|
|
|
+ outfile, writer = __openfile__(curmonth)
|
|
|
|
+
|
|
|
|
+ writer.writerow(line[1])
|
|
|
|
+ outfile.close()
|
|
|
|
+ print("Relevés mensuels générés dans le dossier %s" % os.path.abspath(outputdir))
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ def check_overlaps(self):
|
|
|
|
+ """
|
|
|
|
+ Helps finding possible missing operations if exported CSV files
|
|
|
|
+ are not "contiguous".
|
|
|
|
+ """
|
|
|
|
+ self.badmonths = set()
|
|
|
|
+ print("\nRecherche de chevauchements, car les chevauchements de fichiers CSV c'est bien, ça confirme qu'il n'y a pas d'écritures manquantes...")
|
|
|
|
+ for filename, first_op in self.first_ops.items():
|
|
|
|
+ if first_op in self.overlap_detector:
|
|
|
|
+ otherfiles = [v for v in self.overlap_detector.get(first_op)]
|
|
|
|
+ otherfiles.remove(filename)
|
|
|
|
+ if len(otherfiles) > 0:
|
|
|
|
+ # Eliminate files having the same first_op
|
|
|
|
+ otherfiles[:] = [candidate for candidate in otherfiles if self.first_ops[candidate] != first_op]
|
|
|
|
+ if len(otherfiles) == 0 and first_op[0:10] != "{0:%Y-%m-%d}".format(self.daterange[0]):
|
|
|
|
+ self.badmonths.add(int(first_op[0:7].replace('-', '')))
|
|
|
|
+ print("Attention. Il y a peut-être des écritures manquantes avant le %s (fichier %s)." % (first_op[0:10], os.path.basename(filename)))
|
|
|
|
+
|
|
|
|
+ for filename, last_op in self.last_ops.items():
|
|
|
|
+ if last_op in self.overlap_detector:
|
|
|
|
+ otherfiles = [v for v in self.overlap_detector.get(last_op)]
|
|
|
|
+ otherfiles.remove(filename)
|
|
|
|
+ if len(otherfiles) > 0:
|
|
|
|
+ # Eliminate files having the same last_op
|
|
|
|
+ otherfiles[:] = [candidate for candidate in otherfiles if self.last_ops[candidate] != last_op]
|
|
|
|
+ if len(otherfiles) == 0 and last_op[0:10] != "{0:%Y-%m-%d}".format(self.daterange[1]):
|
|
|
|
+ self.badmonths.add(int(last_op[0:7].replace('-', '')))
|
|
|
|
+ print("Attention. Il y a peut-être des écritures manquantes après le %s (fichier %s)." % (last_op[0:10], os.path.basename(filename)))
|
|
|
|
+ print("")
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+def start_cli(dirpath, outputdir):
|
|
|
|
+ # Lecture des fichiers CSV présents dans le dossier
|
|
|
|
+ p = CsvStatementParser()
|
|
|
|
+ for f in sorted(os.listdir(dirpath)):
|
|
|
|
+ if f.endswith('.csv') or f.endswith('.CSV'):
|
|
|
|
+ p.parse(os.path.join(dirpath, f))
|
|
|
|
+ print("Les écritures lues s'étalent entre le {0:%d %B %Y} et le {1:%d %B %Y}.".format(p.daterange[0], p.daterange[1]))
|
|
|
|
+
|
|
|
|
+ # Recherche de chevauchements
|
|
|
|
+ p.check_overlaps()
|
|
|
|
+
|
|
|
|
+ # Générer un relevé intégral et des relevés mensuels
|
|
|
|
+ suffix = "_{0:%Y-%m-%d}__{1:%Y-%m-%d}".format(p.daterange[0], p.daterange[1])
|
|
|
|
+ if len(p.badmonths): suffix += "_avec_des_trous"
|
|
|
|
+ p.dump_full(os.path.join(outputdir, "integral%s.csv" % suffix))
|
|
|
|
+ p.dump_monthly_reports(outputdir)
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+if __name__ == '__main__':
|
|
|
|
+ if len(sys.argv) < 2:
|
|
|
|
+ print("Erreur. Merci de préciser le chemin du dossier où se trouvent les fichiers CSV à analyser.")
|
|
|
|
+ print("Usage:")
|
|
|
|
+ print(" %s exports_csv/ csv_mensuels/" % sys.argv[0])
|
|
|
|
+ sys.exit(1)
|
|
|
|
+ inputdir = sys.argv[1]
|
|
|
|
+ if len(sys.argv) > 2:
|
|
|
|
+ outputdir = sys.argv[2]
|
|
|
|
+ else:
|
|
|
|
+ outputdir = os.path.join(inputdir, "outputdir")
|
|
|
|
+ # Création d'un dossier output si besoin
|
|
|
|
+ if not os.path.isdir(outputdir):
|
|
|
|
+ os.makedirs(outputdir)
|
|
|
|
+ start_cli(inputdir, outputdir)
|
|
|
|
+
|