Compare commits

..

6 Commits

Author SHA1 Message Date
= 86afb277e5
Fixed work multi-processing & creates combined monthly 3 years ago
= 9e2d960f7e
Passing None as report save path cancels excel doc creation (for some 3 years ago
= 40c2a8a0df
Has issues... 3 years ago
= 5caaf3d7ac
version 4.0 staging | New header relational positioning class structure 3 years ago
= a3905d118e
Readded consolidated reports with Return reports as well. 3 years ago
= 5067678a8c
Project restructure to faciliate future work 3 years ago
  1. 10
      .gitignore
  2. 8
      IL Extract.spec
  3. 1
      compile_gui
  4. 118
      ile_installer.py
  5. 1
      report_config_termplate.toml
  6. BIN
      requirements.txt
  7. 2
      settings.json
  8. 0
      src/__init__.py
  9. 0
      src/assets/checkedCircle.svg
  10. 0
      src/assets/copy.svg
  11. 0
      src/assets/excel.svg
  12. 0
      src/assets/extract.ico
  13. 0
      src/assets/extract.svg
  14. 0
      src/assets/fileSearch.svg
  15. 0
      src/assets/folder.svg
  16. 0
      src/assets/maximize.svg
  17. 0
      src/assets/process.svg
  18. 0
      src/assets/settings.svg
  19. 242
      src/back_reporting.py
  20. 12
      src/datasets/dataset_template.json
  21. 184
      src/extractors.py
  22. 34
      src/il_extract.py
  23. 54
      src/il_reports.py
  24. 3
      src/ui_ile_main_window.py
  25. 39
      todo.md

10
.gitignore vendored

@ -1,10 +1,14 @@
build/
venv/
dist/
inputFiles/
InputFiles/
__pycache__/
2023/
*.spec
*.log
*.xlsx
*.xlsx
*.txt
*.md
!todo.md
!requirements.txt

@ -5,10 +5,10 @@ block_cipher = None
a = Analysis(
['main.py'],
pathex=[],
['src/il_extract.py'],
pathex=['src'],
binaries=[],
datas=[('assets/extract.svg', '.'), ('assets/process.svg', '.'), ('assets/folder.svg', '.'), ('assets/copy.svg', '.'), ('settings.json', '.')],
datas=[('src/assets/*', 'assets'), ('settings.json', '.')],
hiddenimports=[],
hookspath=[],
hooksconfig={},
@ -37,7 +37,7 @@ exe = EXE(
target_arch=None,
codesign_identity=None,
entitlements_file=None,
icon='assets\\extract.ico',
icon='src/assets/extract.ico',
)
coll = COLLECT(
exe,

@ -1 +0,0 @@
pyinstaller -w --add-data "assets/extract.svg;." --add-data "assets/process.svg;." --add-data "assets/folder.svg;." --add-data "assets/copy.svg;." --add-data "settings.json;." -i assets/extract.ico -n "IL Extract" main.py

@ -1,118 +0,0 @@
from os import system, getlogin
import os
from sys import exit
from zipfile import ZipFile
import win32com.client
from glob import glob
import re
from itertools import cycle
from shutil import get_terminal_size
from threading import Thread
from time import sleep
def error_exit(exception_info: str):
print(exception_info)
input("\nPress enter/return to exit")
exit(1)
class NoMatchingFile(Exception):
def __init__(self, search_file: str, found: list) -> None:
super().__init__(f"File: {search_file} was not found: {found}")
class Loader:
def __init__(self, desc="Loading...", end="Done!", timeout=0.1):
"""
A loader-like context manager
Args:
desc (str, optional): The loader's description. Defaults to "Loading...".
end (str, optional): Final print. Defaults to "Done!".
timeout (float, optional): Sleep time between prints. Defaults to 0.1.
"""
self.desc = desc
self.end = end
self.timeout = timeout
self._thread = Thread(target=self._animate, daemon=True)
self.steps = ["|", "/", "-", "\\",]
self.done = False
def start(self):
self._thread.start()
return self
def _animate(self):
for c in cycle(self.steps):
if self.done:
break
print(f"\r{self.desc} {c}", flush=True, end="")
sleep(self.timeout)
def __enter__(self):
self.start()
def stop(self):
self.done = True
cols = get_terminal_size((80, 20)).columns
print("\r" + " " * cols, end="", flush=True)
print(f"\r{self.end}", flush=True)
def __exit__(self, exc_type, exc_value, tb):
# handle exceptions with those variables ^
self.stop()
ZIP_LOCATION = r"\\leafnow.com\public\Accounting Shared\ILE Apps"
APP_FOLDER = r"InfoLeaseExtract"
try:
user = getlogin()
install_folder = f"C:\\Users\\{user}\\AppData\\Local"
backup_install_folder = f"C:\\Users\\{user}\\Documents\\"
print(f"Initalizing InfoLease Extract Installer\n#######################################")
# Find the newest version:
latest_version = glob(f"{ZIP_LOCATION}\\LATEST*")
if len(latest_version) == 0:
# Create Custom exception
raise NoMatchingFile(f"{ZIP_LOCATION}\\LATEST*", latest_version)
latest_version: str = latest_version[0]
version = re.search("\d+\.\d+", latest_version).group()
print(f"Installing verion {version}...")
with ZipFile(latest_version, 'r') as zipObj:
try:
with Loader("Setting up program files..."):
zipObj.extractall(install_folder)
except Exception as e:
error_exit(f"Failed to extract file ({latest_version}) to '{install_folder}' :\n{e}")
print("Creating Desktop shortcut...")
try:
desktop = f"C:\\Users\\{user}\\OneDrive - LEAF Commercial Capital\\Desktop"
shell = win32com.client.Dispatch("WScript.Shell")
shortcut = shell.CreateShortCut(os.path.join(desktop, "IL Extract v3.10.lnk"),)
shortcut.Targetpath = f"{install_folder}\\IL Extract\\IL Extract.exe"
shortcut.IconLocation = f"{install_folder}\\IL Extract\\assets\\extract.ico"
shortcut.WorkingDirectory = f"{install_folder}\\IL Extract"
shortcut.save()
except:
try:
desktop = f"C:\\Users\\{user}\\Desktop"
shell = win32com.client.Dispatch("WScript.Shell")
shortcut = shell.CreateShortCut(os.path.join(desktop, "IL Extract v3.10.lnk"),)
shortcut.Targetpath = f"{install_folder}\\IL Extract\\IL Extract.exe"
shortcut.IconLocation = f"{install_folder}\\IL Extract\\assets\\extract.ico"
shortcut.WorkingDirectory = f"{install_folder}\\IL Extract"
shortcut.save()
except Exception as e:
error_exit(f"Failed to create shortcut. The application is still installed at:\n{install_folder}\\IL Extract.\nYou can manually create a shortcut if you would like.\n{e}")
print(f"\nInstallation Completed Successfully!")
input("\nPress Enter/Return to exit.")
except Exception as e:
error_exit(f"High level exception:\n{e}")

@ -0,0 +1 @@
name = "Test Name"

Binary file not shown.

@ -1 +1 @@
{"debug": false, "consolidatedBasePath": "leafnow.com/shared/cashapps", "defaultLocations": {"ach": "", "disp": "", "gl": "", "lb": "", "minv": "", "niv": "", "ren": "", "pymt": "", "uap": "", "pastdue": ""}}
{"debug": true, "consolidatedBasePath": ".", "defaultLocations": {"ach": "Z:/Business Solutions/Griff/Code/InfoLeaseExtract/InputFiles", "disp": "", "gl": "", "lb": "Z:/Business Solutions/Griff/Code/InfoLeaseExtract/InputFiles", "minv": "", "niv": "", "ren": "", "pymt": "Z:/Business Solutions/Griff/Code/InfoLeaseExtract/InputFiles", "uap": "", "pastdue": ""}}

Before

Width:  |  Height:  |  Size: 1.1 KiB

After

Width:  |  Height:  |  Size: 1.1 KiB

Before

Width:  |  Height:  |  Size: 2.6 KiB

After

Width:  |  Height:  |  Size: 2.6 KiB

Before

Width:  |  Height:  |  Size: 477 B

After

Width:  |  Height:  |  Size: 477 B

Before

Width:  |  Height:  |  Size: 6.9 KiB

After

Width:  |  Height:  |  Size: 6.9 KiB

Before

Width:  |  Height:  |  Size: 18 KiB

After

Width:  |  Height:  |  Size: 18 KiB

Before

Width:  |  Height:  |  Size: 819 B

After

Width:  |  Height:  |  Size: 819 B

Before

Width:  |  Height:  |  Size: 512 B

After

Width:  |  Height:  |  Size: 512 B

Before

Width:  |  Height:  |  Size: 568 B

After

Width:  |  Height:  |  Size: 568 B

Before

Width:  |  Height:  |  Size: 1.2 KiB

After

Width:  |  Height:  |  Size: 1.2 KiB

Before

Width:  |  Height:  |  Size: 3.1 KiB

After

Width:  |  Height:  |  Size: 3.1 KiB

@ -0,0 +1,242 @@
from pathlib import Path
import re
from re import Pattern
import pandas as pd
from pandas import DataFrame, ExcelWriter, read_excel
from datetime import datetime as dt, timedelta
import logging
import il_reports as ilr
from dataclasses import dataclass
from typing import Callable
from tqdm import tqdm
from multiprocessing import Pool, cpu_count
import os
TOP_PATH: Path = Path(r"\\leafnow.com\shared\Accounting\CASH APPS\2023")
class LevelFilter(object):
def __init__(self, level):
self.__level = level
def filter(self, logRecord):
return logRecord.levelno == self.__level
def create_logger(logger_name: str = __name__, ):
logger = logging.getLogger(logger_name)
log_folder = Path(r"\\leafnow.com\shared\Business Solutions\Griff\Code\InfoLeaseExtract\logs")
fail_handler = logging.FileHandler(Path(log_folder,"Fail_br.log"), 'w')
fail_handler.setLevel(logging.WARNING)
info_handler = logging.FileHandler(Path(log_folder,"Info_br.log"), 'w')
info_handler.setLevel(logging.INFO)
info_handler.addFilter(LevelFilter(logging.INFO))
debug_handler = logging.FileHandler(Path(log_folder,"Debug_br.log"), 'w')
debug_handler.setLevel(logging.DEBUG)
debug_handler.addFilter(LevelFilter(logging.DEBUG))
s_handler = logging.StreamHandler()
s_handler.setLevel(logging.INFO)
logger.addHandler(fail_handler)
logger.addHandler(info_handler)
logger.addHandler(debug_handler)
logger.addHandler(s_handler)
return logger
logger = create_logger()
@dataclass
class ExtractInstruction:
input_regex: Pattern
sheet_name: str
extract_method: Callable
@dataclass
class ReportFolder:
folder_name: Path
extraction_methods: list[ExtractInstruction]
def extract_date_path(path: Path) -> Path:
date_pattern = re.compile(r'^\d{4}\.\d{2}$')
for parent in path.parents:
if date_pattern.match(parent.name):
return parent
return None
def append_to_consolidated_report( report_path: Path, report_df: DataFrame, sheet_name: str):
"""
"""
report_month: Path = extract_date_path(report_path)
report_name: str = f"{str(report_month.name).replace('.','-')}_{sheet_name}_ConsolidatedReport.xlsx"
save_path = Path(r"\\leafnow.com\shared\Business Solutions\Griff\Code\InfoLeaseExtract\2023",report_name)
logger.debug(f"{save_path=}")
# Check if the current month has a consolidated report
if not save_path.exists():
logger.debug(f"Consolidated Report | No monthly summary file!\n\tCreating: {save_path}")
# No file exists yet
# Create it and add the current month
try:
with pd.ExcelWriter(save_path) as writer:
logger.debug(f"Consolidated Report | {sheet_name}: Saving data as: {report_name}")
report_df.to_excel(writer, index=False, sheet_name=sheet_name)
except Exception as e:
logger.error(f"Failed to create to consolidated report! {report_name} | {sheet_name} | {report_path} :\n{e}")
else:
# We need to read the dataframe in the current monthly report
# Check that we are not adding matching data
# Save the new report
#FIXME: This is so hacky it's embaressing
try:
current_data_len = len(pd.read_excel(save_path,sheet_name=sheet_name))
with pd.ExcelWriter(save_path, engine='openpyxl', mode='a',if_sheet_exists="overlay") as writer:
logger.debug(f"Consolidated Report | {sheet_name}: Saving data as: {report_name}")
report_df.to_excel(writer, index=False, sheet_name=sheet_name,startrow=current_data_len,header=False)
except Exception as e:
logger.error(f"Failed to append to consolidated report! {report_name} | {sheet_name} | {report_path} :\n{e}")
def process_report(file: Path, extract_inst: ExtractInstruction) -> bool:
try:
with open(str(file), errors="replace") as f:
report_str: str = f.read()
#logger.debug(f"{report_str}")
try:
df: DataFrame = extract_inst.extract_method(report_str, None)
if df.empty:
raise ValueError("Dataframe is empty!")
except Exception as e:
logger.warning(f"Failed to create report df: {extract_inst.sheet_name}:\n{e}")
return False
append_to_consolidated_report(file, df, extract_inst.sheet_name)
return True
except Exception as e:
logger.exception(f"could not process {file}:\n{e}")
return False
def process_folder(folder: ReportFolder):
# Search recurively through date directories
report_date: dt = dt(2023, 5, 1)
while report_date.date() < dt.now().date():
logger.info(f"{folder.folder_name} | Processing date: {report_date}")
report_folder: Path = Path(TOP_PATH,
report_date.strftime("%Y.%m"),
report_date.strftime("%Y.%m.%d"),
folder.folder_name
)
logger.debug(f"report_folder: {report_folder}")
if report_folder.exists():
for xi in folder.extraction_methods:
try:
files = report_folder.glob(f"*{xi.input_regex}*")
report_file: Path = next(files)
logger.debug(f"Report file: {report_file}")
except IndexError as ie:
logger.warning(f"No matching reports!: {ie}")
except Exception as e:
logger.debug(f"Could not get report_file: {report_folder.glob(f'*{xi.input_regex}*')} \n{e}")
continue
try:
success = process_report(report_file, xi)
if success:
logger.info(f"Report Processed: {report_file} | {xi.sheet_name}")
else:
logger.warning(f"Failed to process report: {report_file} | {xi.sheet_name}")
except Exception as e:
logger.exception(f"Could not process report ({report_file}) :\n{e}")
continue
else:
logger.debug(f"Folder '{report_folder}' does not exist!")
report_date = report_date + timedelta(days=1)
logger.debug(f"Finished scanning {folder.folder_name}!")
def combine():
WORK_DIR = Path(r"\\leafnow.com\shared\Business Solutions\Griff\Code\InfoLeaseExtract\2023")
REPORTS = [
"ACH",
"CHECKS LIVE",
"CREDIT CARDS",
"PAY BY PHONE",
"WIRE",
"RETURNS ACH",
"RETURNS PORTAL"
]
for i in range(1,6):
month = f"2023-0{i}"
mcr: Path = Path(f"{month} Consolidated Report.xlsx")
print(f"Creating monthly consolidated report: {mcr}")
with ExcelWriter(Path(WORK_DIR, "Monthly", mcr), engine="xlsxwriter") as wrtr:
for r in REPORTS:
report_path: Path = Path(WORK_DIR, f"{month}_{r}_ConsolidatedReport.xlsx")
print(f"Report Path ({r}): {report_path}")
rdf: DataFrame = read_excel(report_path, sheet_name=r)
rdf.to_excel(wrtr, sheet_name=r, freeze_panes=(1,0), index=False)
if __name__ == "__main__":
FOLDERS = [
ReportFolder("ACH", [
ExtractInstruction("_ACH_", "ACH", ilr.ach),
]),
ReportFolder("CHECKS LIVE", [
ExtractInstruction("_PROGPAY_BER", "CHECKS LIVE", ilr.payment_transactions)
]),
ReportFolder("CREDIT CARDS", [
ExtractInstruction("_VMCC_BER", "CREDIT CARDS", ilr.payment_transactions)
]),
ReportFolder("LOCKBOX", [
ExtractInstruction("_LOCKBOX_\d+_", "LOCKBOX", ilr.lockbox)
]),
ReportFolder("PAY BY PHONE", [
ExtractInstruction("_PBP_EPAY_DPS_BER", "PAY BY PHONE", ilr.payment_transactions)
]),
ReportFolder("RETURN REPORTING", [
ExtractInstruction("_PBP_EPAY_RETURNS_BER", "RETURNS ACH", ilr.payment_transactions),
ExtractInstruction("_RETURNS_BER", "RETURNS PORTAL", ilr.payment_transactions)]
),
ReportFolder("WIRES", [
ExtractInstruction("MTBWIRE_BER", "WIRE", ilr.payment_transactions)
]),
]
process_folder(FOLDERS[0])
# with Pool(cpu_count()) as pool:
# for folder in tqdm(pool.imap_unordered(process_folder,FOLDERS)):
# try:
# print(f"Completed!")
# except Exception as e:
# print(f"Failed to process\n {e}")
# continue
# for folder in tqdm(FOLDERS):
# try:
# process_folder(folder)
# print(f"Completed: {folder.folder_name}")
# except Exception as e:
# print(f"Failed to process {folder.folder_name} \n {e}")
# continue
# input("Complete!")
combine()

@ -0,0 +1,12 @@
{
"name": {
"report": "",
"excel": ""
},
"relative_position": {
"rows": 0,
"col": 0
},
"length": 0,
"data_type": "int"
}

@ -0,0 +1,184 @@
from typing import TypeAlias, TypeVar
from dataclasses import dataclass
from pathlib import Path
import pathlib as pl
from abc import ABC, abstractmethod, abstractproperty
from re import search, match, compile, Match, Pattern
from enum import Enum
ColumnIndex: TypeAlias = int
Money: TypeAlias = float
Numeric = TypeVar("Numeric", float, int)
class Line(Enum):
Header: str
Data: str
Erroneous: str
Top: str
Bottom: str
@dataclass
class RelativePosition:
"""
Coordinates for navigating from one point in a row to another
"""
rows: int
col: ColumnIndex
@dataclass
class DataValue:
position: RelativePosition
length : int
regex: Pattern
dtype: type
def correct_line(self, adj_lines_since_header: int) -> bool:
"""
"""
return adj_lines_since_header % self.position.rows == 0
def _line_slice(self, line: Line.Data) -> str|None:
"""
Attempts to get the data from the line.
Returns string in correct postion or None if out of range.
"""
try:
start: int = self.position.col
end: int = start + self.length
line_slice: str = line[start:end]
except IndexError:
#TODO: Add logging
line_slice = None
finally:
return line_slice
@staticmethod
def _to_float(number_str: str) -> float|None:
try:
f_value:float = float(number_str.replace(',',''))
return f_value
except:
return None
def extract(self, line: Line.Data) -> type|None:
"""
"""
line_slice: str|None = self._line_slice(line)
if isinstance(line_slice, None):
return None
value_match: Match|None = search(self.regex, line_slice)
if isinstance(value_match, None):
return None
value_str: str = value_match.group()
value_str.strip()
if self.dtype == int or self.dtype == float:
return self._to_float(value_str)
#TODO datetime
return value_str
class DataSet:
def __init__(self, config: dict) -> None:
self.r_name = config["naming"]["report"]
try:
self.e_name = config["naming"]["excel"]
except KeyError:
self.e_name = self.r_name
self.data_value: DataValue = DataValue(
position = RelativePosition(
rows= config["relative_position"]["rows"],
col= config["relative_position"]["col"]
),
length = config["length"],
dtype = config["data_type"],
)
def line_position(self, line: str) -> ColumnIndex|None:
"""
Searches a line for the report header for this dataset.
Returns:
- ColumnIndex(int) | None: The column index of the matches end position
or None if no match was found
"""
header_match: Match|None = search(self.r_name, line)
return header_match.end() if isinstance(header_match, Match) else None
@dataclass
class ReportConfig:
file_extension: str
name: str
datasets: list[DataSet]
data_line_regexes: list[Pattern]
class ILReport(ABC):
def __init__(self, file_path: Path, report_config: ReportConfig) -> None:
self.in_file_path: Path = file_path
self.line_gen = self._line_generator(file_path)
self.config: ReportConfig = report_config
self.name = report_config.name
self.line_type_history: list[Line] = []
self.last_header_line: int|None = None
self.data_dict: dict = {
header.e_name: []
for header in self.config.datasets
}
@staticmethod
def _line_generator(file_path: Path):
with open(file_path, 'r') as in_file:
line: str
for line in in_file.readlines():
yield line
def _add_line_history(self, line: Line, max_history: int = 10):
self.line_type_history.append(line)
while len(self.line_type_history) > max_history:
self.line_type_history.pop(0)
def _is_header_line(self, line: str) -> bool:
"""
Checks whether a report line has data headers.
"""
regex: Pattern
for regex in self.config.data_line_regexes:
if isinstance(search(regex,line), Match):
return True
return False
@abstractmethod
def _skip_line(self, line) -> bool:
"""
Tells whether we should skip this line
"""
@abstractmethod
def _process_line(self):
"""
"""
@abstractmethod
def _process_dataline(self, dataline: Line.Data):
"""
"""
# Search the row for a data set name, or list of data set names
# extract all the data until the next row
if __name__ == "__main__":
datasets = []

@ -1,20 +1,20 @@
from ILE_MainWindow import Ui_MainWindow
from ui_ile_main_window import Ui_MainWindow
import sys
import os
import pandas as pd
import json
from PyQt5 import QtWidgets
from datetime import datetime as dt
import ILExtract as ilx
import il_reports as ilx #TODO redo aliasing
from logging import debug, DEBUG, basicConfig
with open("settings.json") as s:
settings = json.loads(s.read())
if settings["debug"]:
basicConfig(filename='debug.log', encoding='utf-8', level=DEBUG)
#if settings["debug"]:
basicConfig(filename='debug.log', mode='w', encoding='utf-8', level=DEBUG)
debug("\n\n\n########################### VERSION = 3.10 ###########################\n\n\n")
debug("\n\n\n########################### VERSION = 3.2 ###########################\n\n\n")
debug("Running main.py...")
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
"""
@ -290,14 +290,16 @@ class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
debug(f"report_type_change | inputFile: {self.inputFile}")
debug(f"report_type_change | outputFile: {self.outputFile}")
self.check_ready_to_process()
# Defines the app
app = QtWidgets.QApplication(sys.argv)
# Sets the style
app.setStyle("Fusion")
# Builds the main window
window = MainWindow()
window.setWindowTitle("IL Extract")
window.show()
# Starts the app
app.exec()
if __name__ == "__main__":
# Defines the app
app = QtWidgets.QApplication(sys.argv)
# Sets the style
app.setStyle("Fusion")
# Builds the main window
window = MainWindow()
window.setWindowTitle("IL Extract")
window.show()
# Starts the app
app.exec()

@ -8,7 +8,7 @@ import numpy as np
from glob import glob
from logging import debug, DEBUG, basicConfig, warn, error
# V3.1 | 01/19/23
# V3.2 | 04/21/23
with open("settings.json") as s:
settings = json.loads(s.read())
@ -70,10 +70,14 @@ class ILReport:
sheet_name = "CREDIT CARDS"
elif re.search("(?i)lockbox", self.location) != None:
sheet_name = "LOCKBOX"
elif re.search("(?i)PBP_EPAY_RETURNS_BER", self.location) != None:
sheet_name = "RETURNS ACH"
elif re.search("(?i)epay", self.location) != None:
sheet_name = "PAY BY PHONE"
elif re.search("(?i)wires", self.location) != None:
sheet_name = "WIRES"
elif re.search("(?i)RETURNS_BER", self.location) != None:
sheet_name = "RETURNS Portal"
else:
return None
@ -102,18 +106,25 @@ class ILReport:
# We need to read the dataframe in the current monthly report
# Check that we are not adding matching data
# Save the new report
current_data: DataFrame = pd.read_excel(month_summary_file[0], sheet_name=sheet_name)
new_data_len = len(dataframe_to_append)
cur_first_col = current_data.iloc[len(current_data)-new_data_len:,0].to_list()
new_first_col = dataframe_to_append.iloc[:,0].to_list()
if cur_first_col == new_first_col:
debug(f"Consolidated Report | Data is same as previous! Skipping!")
return None
#FIXME: This is so hacky it's embaressing
add_headers = False
try:
current_data: DataFrame = pd.read_excel(month_summary_file[0], sheet_name=sheet_name)
new_data_len = len(dataframe_to_append)
cur_first_col = current_data.iloc[len(current_data)-new_data_len:,0].to_list()
new_first_col = dataframe_to_append.iloc[:,0].to_list()
if cur_first_col == new_first_col:
debug(f"Consolidated Report | Data is same as previous! Skipping!")
return None
except ValueError as ve:
ve == ValueError(f"Worksheet named '{sheet_name} not found")
current_data = []
add_headers = True
# We need to find the start cols (where the new data should go)
try:
with pd.ExcelWriter(save_path, engine='openpyxl', mode='a',if_sheet_exists="overlay") as writer:
debug(f"Consolidated Report | {sheet_name}: Saving data as: {report_name}")
dataframe_to_append.to_excel(writer, index=False, sheet_name=sheet_name,startrow=len(current_data),header=False)
dataframe_to_append.to_excel(writer, index=False, sheet_name=sheet_name,startrow=len(current_data),header=add_headers)
except Exception as e:
error(f"[E] Failed to append to consolidated report! {sheet_name}:\n{e}")
@ -181,7 +192,7 @@ COMMON REGEX COMPONENTS
"""
def ach(report: str, save_name: str):
def ach(report: str, save_name: str|None):
debug(f"ACH Report {save_name} :\n{report}")
lines = report.splitlines()
extracted_data_dict = {
@ -235,11 +246,12 @@ def ach(report: str, save_name: str):
dataframe: DataFrame = DataFrame(extracted_data_dict)
# We're creating two sheets: data & summary so we need to open and excel writer
# This also helps with a bug caused by larger dataframes
with pd.ExcelWriter(save_name) as writer:
debug(f"ACH: Saving data as: {save_name}")
dataframe.to_excel(writer, index=False, sheet_name="data")
# The batches dictioanry is converted to a dataframe and added as it's own sheet
DataFrame(batches).to_excel(writer, index=False, sheet_name="Summary")
if save_name is not None:
with pd.ExcelWriter(save_name) as writer:
debug(f"ACH: Saving data as: {save_name}")
dataframe.to_excel(writer, index=False, sheet_name="data")
# The batches dictioanry is converted to a dataframe and added as it's own sheet
DataFrame(batches).to_excel(writer, index=False, sheet_name="Summary")
return dataframe
def disposition(report: str, save_name: str):
@ -614,7 +626,7 @@ def net_invest_trial_balance(report: str, save_name: str):
writer, index=True, sheet_name="Summary")
return dataframe
def lockbox(report: str, save_name: str):
def lockbox(report: str, save_name: str|None):
debug(f"LockBox Report {save_name}:\n{report}")
lines = report.splitlines()
extracted_data_dict = {
@ -667,7 +679,8 @@ def lockbox(report: str, save_name: str):
extracted_data_dict["CUST NAME"].append(lines[index+1].strip())
dataframe = DataFrame(extracted_data_dict)
debug(f"LockBox | Saving dataframe: {save_name}")
dataframe.to_excel(save_name, index=False)
if save_name is not None:
dataframe.to_excel(save_name, index=False)
return dataframe
@ -706,7 +719,7 @@ def minv(report: str, save_name: str):
# Good for PUB_WIRES, VMCC, PBP_EPAY, returned check
def payment_transactions(report: str, save_name: str):
def payment_transactions(report: str, save_name: str|None):
debug(f"PayTrans | {save_name}:\n{report}")
lines = report.splitlines()
data_extractor = create_line_divider([6,33,52,62,80,89,110,121])
@ -743,8 +756,9 @@ def payment_transactions(report: str, save_name: str):
extracted_data_dict['INV NO'].append(inv_no)
dataframe = DataFrame(extracted_data_dict)
debug(f"PayTrans | Complted Dataframe:\n{dataframe}")
dataframe.to_excel(save_name, index=False)
debug(f"PayTrans | Saved to {save_name}")
if save_name is not None:
dataframe.to_excel(save_name, index=False)
debug(f"PayTrans | Saved to {save_name}")
return dataframe

@ -1,3 +1,6 @@
"""
The user interface set up for the main window of the application
"""
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ILE_MainWindow.ui'

@ -0,0 +1,39 @@
# Work List
## Priority
- [ ] Bring back in consolidated reports
- [X] ACH
- [X] CHECKS_LIVE
- [X] CREDIT
- [X] LOCKBOX
- [X] PAY BY PHONE
- [X] WIRES
- [ ] RETURNS ACH
- [ ] RETURNS Portal *(new addition)*
- [ ] Adjust pyinstaller spec for new file structure
- [ ] Function to recap year
- [ ] Fix Logging
## Feature Goals
- [ ] Year Walkthrough report
- [ ] 'In Progress' notification/spinner
- Speed up ACH/All
Generate monthly consolidated reports for each month in a year
- Must generate IL Extract report where nesseary
- [ ] Users can add/create new reports
- This would be very complicated
## Code Improvement
- [ ] Rework IL Report as an ABC and report types as sub-classes
- [ ] Rework config set up
- [ ] Simpify & standardize row/data parsing
## Completed last commit
---
Loading…
Cancel
Save