services api
This commit is contained in:
93
trash/XBuildFromIban/.dockerignore
Normal file
93
trash/XBuildFromIban/.dockerignore
Normal file
@@ -0,0 +1,93 @@
|
||||
# Git
|
||||
.git
|
||||
.gitignore
|
||||
.gitattributes
|
||||
|
||||
|
||||
# CI
|
||||
.codeclimate.yml
|
||||
.travis.yml
|
||||
.taskcluster.yml
|
||||
|
||||
# Docker
|
||||
docker-compose.yml
|
||||
service_app/Dockerfile
|
||||
.docker
|
||||
.dockerignore
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
**/__pycache__/
|
||||
**/*.py[cod]
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
service_app/env/
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.coverage
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Virtual environment
|
||||
service_app/.env
|
||||
.venv/
|
||||
venv/
|
||||
|
||||
# PyCharm
|
||||
.idea
|
||||
|
||||
# Python mode for VIM
|
||||
.ropeproject
|
||||
**/.ropeproject
|
||||
|
||||
# Vim swap files
|
||||
**/*.swp
|
||||
|
||||
# VS Code
|
||||
.vscode/
|
||||
|
||||
test_application/
|
||||
|
||||
|
||||
33
trash/XBuildFromIban/Dockerfile
Normal file
33
trash/XBuildFromIban/Dockerfile
Normal file
@@ -0,0 +1,33 @@
|
||||
FROM python:3.12-slim
|
||||
|
||||
WORKDIR /
|
||||
|
||||
# Set Python path to include app directory
|
||||
ENV PYTHONPATH=/ PYTHONUNBUFFERED=1 PYTHONDONTWRITEBYTECODE=1
|
||||
|
||||
# Install system dependencies and Poetry
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends gcc && rm -rf /var/lib/apt/lists/* && pip install --no-cache-dir poetry
|
||||
|
||||
# Copy Poetry configuration
|
||||
COPY /pyproject.toml ./pyproject.toml
|
||||
|
||||
# Configure Poetry and install dependencies with optimizations
|
||||
RUN poetry config virtualenvs.create false && poetry install --no-interaction --no-ansi --no-root --only main && pip cache purge && rm -rf ~/.cache/pypoetry
|
||||
|
||||
# Install cron for scheduling tasks
|
||||
RUN apt-get update && apt-get install -y cron
|
||||
|
||||
# Copy application code
|
||||
COPY /ServicesBank/Finder/BuildFromIban /
|
||||
COPY /ServicesApi/Schemas /Schemas
|
||||
COPY /ServicesApi/Controllers /Controllers
|
||||
|
||||
# Create log file to grab cron logs
|
||||
RUN touch /var/log/cron.log
|
||||
|
||||
# Make entrypoint script executable
|
||||
RUN chmod +x /entrypoint.sh
|
||||
RUN chmod +x /run_app.sh
|
||||
|
||||
# Use entrypoint script to update run_app.sh with environment variables and start cron
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
3
trash/XBuildFromIban/README.md
Normal file
3
trash/XBuildFromIban/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Docs of Finder
|
||||
|
||||
Finds people, living spaces, companies from AccountRecords
|
||||
30
trash/XBuildFromIban/entrypoint.sh
Normal file
30
trash/XBuildFromIban/entrypoint.sh
Normal file
@@ -0,0 +1,30 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Create environment file that will be available to cron jobs
|
||||
echo "POSTGRES_USER=\"$POSTGRES_USER\"" >> /env.sh
|
||||
echo "POSTGRES_PASSWORD=\"$POSTGRES_PASSWORD\"" >> /env.sh
|
||||
echo "POSTGRES_DB=\"$POSTGRES_DB\"" >> /env.sh
|
||||
echo "POSTGRES_HOST=\"$POSTGRES_HOST\"" >> /env.sh
|
||||
echo "POSTGRES_PORT=$POSTGRES_PORT" >> /env.sh
|
||||
echo "POSTGRES_ENGINE=\"$POSTGRES_ENGINE\"" >> /env.sh
|
||||
echo "POSTGRES_POOL_PRE_PING=\"$POSTGRES_POOL_PRE_PING\"" >> /env.sh
|
||||
echo "POSTGRES_POOL_SIZE=$POSTGRES_POOL_SIZE" >> /env.sh
|
||||
echo "POSTGRES_MAX_OVERFLOW=$POSTGRES_MAX_OVERFLOW" >> /env.sh
|
||||
echo "POSTGRES_POOL_RECYCLE=$POSTGRES_POOL_RECYCLE" >> /env.sh
|
||||
echo "POSTGRES_POOL_TIMEOUT=$POSTGRES_POOL_TIMEOUT" >> /env.sh
|
||||
echo "POSTGRES_ECHO=\"$POSTGRES_ECHO\"" >> /env.sh
|
||||
|
||||
# Add Python environment variables
|
||||
echo "PYTHONPATH=/" >> /env.sh
|
||||
echo "PYTHONUNBUFFERED=1" >> /env.sh
|
||||
echo "PYTHONDONTWRITEBYTECODE=1" >> /env.sh
|
||||
|
||||
# Make the environment file available to cron
|
||||
echo "*/5 * * * * /run_app.sh >> /var/log/cron.log 2>&1" > /tmp/crontab_list
|
||||
crontab /tmp/crontab_list
|
||||
|
||||
# Start cron
|
||||
cron
|
||||
|
||||
# Tail the log file
|
||||
tail -f /var/log/cron.log
|
||||
26
trash/XBuildFromIban/run_app.sh
Normal file
26
trash/XBuildFromIban/run_app.sh
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Source the environment file directly
|
||||
. /env.sh
|
||||
|
||||
# Re-export all variables to ensure they're available to the Python script
|
||||
export POSTGRES_USER
|
||||
export POSTGRES_PASSWORD
|
||||
export POSTGRES_DB
|
||||
export POSTGRES_HOST
|
||||
export POSTGRES_PORT
|
||||
export POSTGRES_ENGINE
|
||||
export POSTGRES_POOL_PRE_PING
|
||||
export POSTGRES_POOL_SIZE
|
||||
export POSTGRES_MAX_OVERFLOW
|
||||
export POSTGRES_POOL_RECYCLE
|
||||
export POSTGRES_POOL_TIMEOUT
|
||||
export POSTGRES_ECHO
|
||||
|
||||
# Python environment variables
|
||||
export PYTHONPATH
|
||||
export PYTHONUNBUFFERED
|
||||
export PYTHONDONTWRITEBYTECODE
|
||||
|
||||
# env >> /var/log/cron.log
|
||||
/usr/local/bin/python /runner.py
|
||||
26
trash/XBuildFromIban/runner.py
Normal file
26
trash/XBuildFromIban/runner.py
Normal file
@@ -0,0 +1,26 @@
|
||||
import arrow
|
||||
|
||||
from Schemas import AccountRecords, BuildIbans
|
||||
|
||||
|
||||
def account_find_build_from_iban(session):
|
||||
AccountRecords.set_session(session)
|
||||
BuildIbans.set_session(session)
|
||||
|
||||
account_records_ibans = AccountRecords.query.filter(AccountRecords.build_id == None, AccountRecords.approved_record == False).distinct(AccountRecords.iban).all()
|
||||
for account_records_iban in account_records_ibans:
|
||||
found_iban: BuildIbans = BuildIbans.query.filter(BuildIbans.iban == account_records_iban.iban).first()
|
||||
if not found_iban:
|
||||
create_build_ibans = BuildIbans.create(iban=account_records_iban.iban, start_date=str(arrow.now().shift(days=-1)))
|
||||
create_build_ibans.save()
|
||||
else:
|
||||
update_dict = {"build_id": found_iban.build_id, "build_uu_id": str(found_iban.build_uu_id)}
|
||||
session.query(AccountRecords).filter(AccountRecords.iban == account_records_iban.iban).update(update_dict, synchronize_session=False)
|
||||
session.commit()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("Account Records Service is running...")
|
||||
with AccountRecords.new_session() as session:
|
||||
account_find_build_from_iban(session=session)
|
||||
print("Account Records Service is finished...")
|
||||
93
trash/XBuildLivingSpace/.dockerignore
Normal file
93
trash/XBuildLivingSpace/.dockerignore
Normal file
@@ -0,0 +1,93 @@
|
||||
# Git
|
||||
.git
|
||||
.gitignore
|
||||
.gitattributes
|
||||
|
||||
|
||||
# CI
|
||||
.codeclimate.yml
|
||||
.travis.yml
|
||||
.taskcluster.yml
|
||||
|
||||
# Docker
|
||||
docker-compose.yml
|
||||
service_app/Dockerfile
|
||||
.docker
|
||||
.dockerignore
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
**/__pycache__/
|
||||
**/*.py[cod]
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
service_app/env/
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.coverage
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Virtual environment
|
||||
service_app/.env
|
||||
.venv/
|
||||
venv/
|
||||
|
||||
# PyCharm
|
||||
.idea
|
||||
|
||||
# Python mode for VIM
|
||||
.ropeproject
|
||||
**/.ropeproject
|
||||
|
||||
# Vim swap files
|
||||
**/*.swp
|
||||
|
||||
# VS Code
|
||||
.vscode/
|
||||
|
||||
test_application/
|
||||
|
||||
|
||||
33
trash/XBuildLivingSpace/Dockerfile
Normal file
33
trash/XBuildLivingSpace/Dockerfile
Normal file
@@ -0,0 +1,33 @@
|
||||
FROM python:3.12-slim
|
||||
|
||||
WORKDIR /
|
||||
|
||||
# Set Python path to include app directory
|
||||
ENV PYTHONPATH=/ PYTHONUNBUFFERED=1 PYTHONDONTWRITEBYTECODE=1
|
||||
|
||||
# Install system dependencies and Poetry
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends gcc && rm -rf /var/lib/apt/lists/* && pip install --no-cache-dir poetry
|
||||
|
||||
# Copy Poetry configuration
|
||||
COPY /pyproject.toml ./pyproject.toml
|
||||
|
||||
# Configure Poetry and install dependencies with optimizations
|
||||
RUN poetry config virtualenvs.create false && poetry install --no-interaction --no-ansi --no-root --only main && pip cache purge && rm -rf ~/.cache/pypoetry
|
||||
|
||||
# Install cron for scheduling tasks
|
||||
RUN apt-get update && apt-get install -y cron
|
||||
|
||||
# Copy application code
|
||||
COPY /ServicesBank/Finder/BuildLivingSpace /
|
||||
COPY /ServicesApi/Schemas /Schemas
|
||||
COPY /ServicesApi/Controllers /Controllers
|
||||
|
||||
# Create log file to grab cron logs
|
||||
RUN touch /var/log/cron.log
|
||||
|
||||
# Make entrypoint script executable
|
||||
RUN chmod +x /entrypoint.sh
|
||||
RUN chmod +x /run_app.sh
|
||||
|
||||
# Use entrypoint script to update run_app.sh with environment variables and start cron
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
3
trash/XBuildLivingSpace/README.md
Normal file
3
trash/XBuildLivingSpace/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Docs of Finder
|
||||
|
||||
Finds people, living spaces, companies from AccountRecords
|
||||
8
trash/XBuildLivingSpace/configs.py
Normal file
8
trash/XBuildLivingSpace/configs.py
Normal file
@@ -0,0 +1,8 @@
|
||||
class AccountConfig:
|
||||
BEFORE_DAY = 30
|
||||
CATEGORIES = {
|
||||
"DAIRE": ["daire", "dagire", "daare", "nolu daire", "no", "nolu dairenin"],
|
||||
"APARTMAN": ["apartman", "aparman", "aprmn"],
|
||||
"VILLA": ["villa", "vlla"],
|
||||
"BINA": ["bina", "binna"],
|
||||
}
|
||||
30
trash/XBuildLivingSpace/entrypoint.sh
Normal file
30
trash/XBuildLivingSpace/entrypoint.sh
Normal file
@@ -0,0 +1,30 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Create environment file that will be available to cron jobs
|
||||
echo "POSTGRES_USER=\"$POSTGRES_USER\"" >> /env.sh
|
||||
echo "POSTGRES_PASSWORD=\"$POSTGRES_PASSWORD\"" >> /env.sh
|
||||
echo "POSTGRES_DB=\"$POSTGRES_DB\"" >> /env.sh
|
||||
echo "POSTGRES_HOST=\"$POSTGRES_HOST\"" >> /env.sh
|
||||
echo "POSTGRES_PORT=$POSTGRES_PORT" >> /env.sh
|
||||
echo "POSTGRES_ENGINE=\"$POSTGRES_ENGINE\"" >> /env.sh
|
||||
echo "POSTGRES_POOL_PRE_PING=\"$POSTGRES_POOL_PRE_PING\"" >> /env.sh
|
||||
echo "POSTGRES_POOL_SIZE=$POSTGRES_POOL_SIZE" >> /env.sh
|
||||
echo "POSTGRES_MAX_OVERFLOW=$POSTGRES_MAX_OVERFLOW" >> /env.sh
|
||||
echo "POSTGRES_POOL_RECYCLE=$POSTGRES_POOL_RECYCLE" >> /env.sh
|
||||
echo "POSTGRES_POOL_TIMEOUT=$POSTGRES_POOL_TIMEOUT" >> /env.sh
|
||||
echo "POSTGRES_ECHO=\"$POSTGRES_ECHO\"" >> /env.sh
|
||||
|
||||
# Add Python environment variables
|
||||
echo "PYTHONPATH=/" >> /env.sh
|
||||
echo "PYTHONUNBUFFERED=1" >> /env.sh
|
||||
echo "PYTHONDONTWRITEBYTECODE=1" >> /env.sh
|
||||
|
||||
# Make the environment file available to cron
|
||||
echo "*/15 * * * * /run_app.sh >> /var/log/cron.log 2>&1" > /tmp/crontab_list
|
||||
crontab /tmp/crontab_list
|
||||
|
||||
# Start cron
|
||||
cron
|
||||
|
||||
# Tail the log file
|
||||
tail -f /var/log/cron.log
|
||||
319
trash/XBuildLivingSpace/parser.py
Normal file
319
trash/XBuildLivingSpace/parser.py
Normal file
@@ -0,0 +1,319 @@
|
||||
import re
|
||||
import textdistance
|
||||
|
||||
from unidecode import unidecode
|
||||
from gc import garbage
|
||||
from Schemas import AccountRecords, People, Build, Companies, BuildIbanDescription
|
||||
from regex_func import category_finder
|
||||
from validations import Similarity
|
||||
|
||||
|
||||
def parse_comment_to_split_with_star(account_record):
|
||||
# Handle both ORM objects and dictionaries
|
||||
try:
|
||||
# Check if account_record is a dictionary or an ORM object
|
||||
if isinstance(account_record, dict):
|
||||
process_comment = str(account_record.get('process_comment', ''))
|
||||
else:
|
||||
process_comment = str(account_record.process_comment)
|
||||
|
||||
if "*" in process_comment:
|
||||
process_comment_cleaned = process_comment.replace("**", "*")
|
||||
process_comments = process_comment_cleaned.split("*")
|
||||
return len(process_comments), *process_comments
|
||||
return 1, process_comment
|
||||
except Exception as e:
|
||||
# print(f"Error in parse_comment_to_split_with_star: {e}")
|
||||
# Return a safe default if there's an error
|
||||
return 1, ""
|
||||
|
||||
|
||||
def remove_garbage_words(comment: str, garbage_word: str):
|
||||
cleaned_comment = remove_spaces_from_string(comment.replace("*", " "))
|
||||
if garbage_word:
|
||||
garbage_word = remove_spaces_from_string(garbage_word.replace("*", " "))
|
||||
for letter in garbage_word.split(" "):
|
||||
cleaned_comment = unidecode(remove_spaces_from_string(cleaned_comment))
|
||||
cleaned_comment = cleaned_comment.replace(remove_spaces_from_string(letter), "")
|
||||
return str(remove_spaces_from_string(cleaned_comment)).upper()
|
||||
|
||||
|
||||
def remove_spaces_from_string(remove_string: str):
|
||||
letter_list = []
|
||||
for letter in remove_string.split(" "):
|
||||
if letter_ := "".join(i for i in letter if not i == " "):
|
||||
letter_list.append(letter_)
|
||||
return " ".join(letter_list).upper()
|
||||
|
||||
|
||||
def get_garbage_words(comment: str, search_word: str):
|
||||
garbage_words = unidecode(remove_spaces_from_string(comment))
|
||||
search_word = unidecode(remove_spaces_from_string(search_word))
|
||||
for word in search_word.split(" "):
|
||||
garbage_words = garbage_words.replace(remove_spaces_from_string(unidecode(word)), "")
|
||||
if cleaned_from_spaces := remove_spaces_from_string(garbage_words):
|
||||
return str(unidecode(cleaned_from_spaces)).upper()
|
||||
return None
|
||||
|
||||
|
||||
def parse_comment_with_name_iban_description(account_record):
|
||||
# Extract necessary data from account_record to avoid session detachment
|
||||
if isinstance(account_record, dict):
|
||||
iban = account_record.get('iban', '')
|
||||
process_comment = account_record.get('process_comment', '')
|
||||
else:
|
||||
try:
|
||||
iban = account_record.iban
|
||||
process_comment = account_record.process_comment
|
||||
except Exception as e:
|
||||
# print(f"Error accessing account_record attributes: {e}")
|
||||
return Similarity(similarity=0.0, garbage="", cleaned="")
|
||||
|
||||
# Process the comment locally without depending on the account_record object
|
||||
if "*" in process_comment:
|
||||
process_comment_cleaned = str(process_comment.replace("**", "*"))
|
||||
process_comments = process_comment_cleaned.split("*")
|
||||
comments_list, comments_length = process_comments, len(process_comments)
|
||||
else:
|
||||
comments_list, comments_length = [process_comment], 1
|
||||
|
||||
# print("comments_list", comments_list, "comments_length", comments_length)
|
||||
|
||||
with BuildIbanDescription.new_session() as session:
|
||||
BuildIbanDescription.set_session(session)
|
||||
Companies.set_session(session)
|
||||
|
||||
iban_results = BuildIbanDescription.query.filter(BuildIbanDescription.iban == iban).all()
|
||||
best_similarity = Similarity(similarity=0.0, garbage="", cleaned="")
|
||||
|
||||
for comment in comments_list:
|
||||
for iban_result in iban_results:
|
||||
search_word = unidecode(iban_result.search_word)
|
||||
garbage_words = get_garbage_words(comment, search_word)
|
||||
cleaned_comment = remove_garbage_words(comment, garbage_words)
|
||||
similarity_ratio = textdistance.jaro_winkler(cleaned_comment, search_word)
|
||||
company = Companies.query.filter_by(id=iban_result.company_id).first()
|
||||
|
||||
if float(similarity_ratio) > float(best_similarity.similarity):
|
||||
best_similarity = Similarity(similarity=similarity_ratio, garbage=garbage_words, cleaned=cleaned_comment)
|
||||
best_similarity.set_company(company)
|
||||
best_similarity.set_found_from("Customer Public Name Description")
|
||||
return best_similarity
|
||||
|
||||
|
||||
def parse_comment_for_build_parts(comment: str, max_build_part: int = 200, parse: str = "DAIRE"):
|
||||
results, results_list = category_finder(comment), []
|
||||
# print("results[parse]", results[parse])
|
||||
for result in results[parse] or []:
|
||||
if digits := "".join([letter for letter in str(result) if letter.isdigit()]):
|
||||
# print("digits", digits)
|
||||
if int(digits) <= int(max_build_part):
|
||||
results_list.append(int(digits))
|
||||
return results_list or None
|
||||
|
||||
|
||||
def parse_comment_with_name(account_record, living_space_dict: dict = None):
|
||||
# Extract necessary data from account_record to avoid session detachment
|
||||
if isinstance(account_record, dict):
|
||||
iban = account_record.get('iban', '')
|
||||
process_comment = account_record.get('process_comment', '')
|
||||
try:
|
||||
currency_value = int(account_record.get('currency_value', 0))
|
||||
except (ValueError, TypeError):
|
||||
currency_value = 0
|
||||
else:
|
||||
try:
|
||||
iban = account_record.iban
|
||||
process_comment = account_record.process_comment
|
||||
currency_value = int(account_record.currency_value)
|
||||
except Exception as e:
|
||||
# print(f"Error accessing account_record attributes: {e}")
|
||||
return Similarity(similarity=0.0, garbage="", cleaned="")
|
||||
|
||||
# Process the comment locally without depending on the account_record object
|
||||
if "*" in process_comment:
|
||||
process_comment_cleaned = str(process_comment.replace("**", "*"))
|
||||
process_comments = process_comment_cleaned.split("*")
|
||||
comments_list, comments_length = process_comments, len(process_comments)
|
||||
else:
|
||||
comments_list, comments_length = [process_comment], 1
|
||||
|
||||
# print("comments_list", comments_list, "comments_length", comments_length)
|
||||
best_similarity = Similarity(similarity=0.0, garbage="", cleaned="")
|
||||
|
||||
if currency_value > 0: # Build receive money from living space people
|
||||
living_space_matches = dict(living_space_dict=living_space_dict, iban=iban, whole_comment=process_comment)
|
||||
if comments_length == 1:
|
||||
best_similarity = parse_comment_for_living_space(iban=iban, comment=comments_list[0], living_space_dict=living_space_dict)
|
||||
best_similarity.set_send_person_id(best_similarity.customer_id)
|
||||
living_space_matches["best_similarity"] = best_similarity
|
||||
# if 0.5 < float(best_similarity['similarity']) < 0.8
|
||||
best_similarity = check_build_living_space_matches_with_build_parts(**living_space_matches)
|
||||
return best_similarity
|
||||
for comment in comments_list:
|
||||
similarity_result = parse_comment_for_living_space(iban=iban, comment=comment, living_space_dict=living_space_dict)
|
||||
if float(similarity_result.similarity) > float(best_similarity.similarity):
|
||||
best_similarity = similarity_result
|
||||
living_space_matches["best_similarity"] = best_similarity
|
||||
# if 0.5 < float(best_similarity['similarity']) < 0.8:
|
||||
best_similarity = check_build_living_space_matches_with_build_parts(**living_space_matches)
|
||||
# print("last best_similarity", best_similarity)
|
||||
return best_similarity
|
||||
else: # Build pays money for service taken from company or individual
|
||||
if not comments_length > 1:
|
||||
best_similarity = parse_comment_for_company_or_individual(comment=comments_list[0])
|
||||
best_similarity.set_send_person_id(best_similarity.customer_id)
|
||||
return best_similarity
|
||||
for comment in comments_list:
|
||||
similarity_result = parse_comment_for_company_or_individual(comment=comment)
|
||||
if float(similarity_result.similarity) > float(best_similarity.similarity):
|
||||
best_similarity = similarity_result
|
||||
return best_similarity
|
||||
|
||||
|
||||
def check_build_living_space_matches_with_build_parts(living_space_dict: dict, best_similarity: Similarity, iban: str, whole_comment: str):
|
||||
if 0.6 < float(best_similarity.similarity) < 0.8:
|
||||
build_parts_data = living_space_dict[iban]["build_parts"]
|
||||
# Check if we have living space ID in the similarity object
|
||||
living_space_id = getattr(best_similarity, 'living_space_id', None)
|
||||
if living_space_id:
|
||||
# Find the corresponding living space data
|
||||
living_space_data = None
|
||||
for ls in living_space_dict[iban]["living_space"]:
|
||||
if ls.get('id') == living_space_id:
|
||||
living_space_data = ls
|
||||
break
|
||||
|
||||
if living_space_data:
|
||||
build_parts_id = living_space_data.get('build_parts_id')
|
||||
parser_dict = dict(comment=str(whole_comment), max_build_part=len(build_parts_data))
|
||||
# print("build parts similarity", best_similarity, "parser_dict", parser_dict)
|
||||
results_list = parse_comment_for_build_parts(**parser_dict)
|
||||
# print("results_list", results_list)
|
||||
if not results_list:
|
||||
return best_similarity
|
||||
|
||||
for build_part_data in build_parts_data:
|
||||
# Get part_no directly if it exists in the dictionary
|
||||
part_no = build_part_data.get('part_no')
|
||||
|
||||
# If part_no doesn't exist, try to extract it from other attributes
|
||||
if part_no is None:
|
||||
# Try to get it from a name attribute if it exists
|
||||
name = build_part_data.get('name', '')
|
||||
if name and isinstance(name, str) and 'part' in name.lower():
|
||||
try:
|
||||
part_no = int(name.lower().replace('part', '').strip())
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
# If we have a part_no, proceed with the comparison
|
||||
if part_no is not None:
|
||||
# print("part_no", part_no, " | ", results_list)
|
||||
# print("build_part", build_part_data.get('id'), build_parts_id)
|
||||
# print("cond", build_part_data.get('id') == build_parts_id)
|
||||
# print("cond2", part_no in results_list)
|
||||
|
||||
if build_part_data.get('id') == build_parts_id and part_no in results_list:
|
||||
similarity = float(best_similarity.similarity)
|
||||
best_similarity.set_similarity((1 - similarity) / 2 + similarity)
|
||||
# print("similarity", best_similarity.similarity)
|
||||
break
|
||||
return best_similarity
|
||||
|
||||
|
||||
def parse_comment_for_company_or_individual(comment: str):
|
||||
# Extract all necessary data from Companies within the session
|
||||
companies_data = []
|
||||
with Companies.new_session() as session:
|
||||
Companies.set_session(session)
|
||||
companies_list = Companies.query.filter(Companies.commercial_type != "Commercial").all()
|
||||
|
||||
# Extract all needed data from companies while session is active
|
||||
for company in companies_list:
|
||||
company_data = {
|
||||
'id': company.id,
|
||||
'public_name': unidecode(company.public_name)
|
||||
}
|
||||
# Add any other needed attributes
|
||||
if hasattr(company, 'commercial_type'):
|
||||
company_data['commercial_type'] = company.commercial_type
|
||||
companies_data.append(company_data)
|
||||
|
||||
# Process the data outside the session
|
||||
comment = unidecode(comment)
|
||||
best_similarity = Similarity(similarity=0.0, garbage="", cleaned="")
|
||||
|
||||
for company_data in companies_data:
|
||||
search_word = company_data['public_name']
|
||||
garbage_words = get_garbage_words(comment, search_word)
|
||||
cleaned_comment = remove_garbage_words(comment, garbage_words)
|
||||
similarity_ratio = textdistance.jaro_winkler(cleaned_comment, search_word)
|
||||
|
||||
if similarity_ratio > float(best_similarity.similarity):
|
||||
best_similarity = Similarity(similarity=similarity_ratio, garbage=garbage_words, cleaned=cleaned_comment)
|
||||
# Store company ID instead of the ORM object
|
||||
best_similarity.set_company_id(company_data['id'])
|
||||
best_similarity.set_found_from("Customer Public Name")
|
||||
# print('cleaned_comment', cleaned_comment, '\n', 'search_word', search_word, '\n', 'best_similarity', best_similarity, '\n',
|
||||
# 'company name', company_data['public_name'], '\n', 'similarity_ratio', similarity_ratio, '\n', 'garbage_words', garbage_words)
|
||||
|
||||
return best_similarity
|
||||
|
||||
|
||||
def parse_comment_for_living_space(iban: str, comment: str, living_space_dict: dict = None) -> Similarity:
|
||||
comment = unidecode(comment)
|
||||
best_similarity = Similarity(similarity=0.0, garbage="", cleaned="")
|
||||
|
||||
if not iban in living_space_dict:
|
||||
return best_similarity
|
||||
|
||||
for person_data in living_space_dict[iban]["people"]:
|
||||
# Extract name components from dictionary
|
||||
first_name = unidecode(person_data.get('name', '')).upper()
|
||||
last_name = unidecode(person_data.get('surname', '')).upper()
|
||||
search_word_list = [
|
||||
remove_spaces_from_string("".join([f"{first_name} {last_name}"])),
|
||||
remove_spaces_from_string("".join([f"{last_name} {first_name}"])),
|
||||
]
|
||||
# We don't have middle_name in our dictionary, so skip that part
|
||||
|
||||
cleaned_comment = unidecode(comment).upper()
|
||||
for search_word in search_word_list:
|
||||
if garbage_words := get_garbage_words(comment, unidecode(search_word)):
|
||||
garbage_words = unidecode(garbage_words).upper()
|
||||
cleaned_comment = unidecode(remove_garbage_words(comment, garbage_words)).upper()
|
||||
similarity_ratio = textdistance.jaro_winkler(cleaned_comment, str(search_word).upper())
|
||||
if len(cleaned_comment) < len(f"{first_name}{last_name}"):
|
||||
continue
|
||||
if cleaned_comment and 0.9 < similarity_ratio <= 1:
|
||||
pass
|
||||
# print("cleaned comment dict", dict(
|
||||
# garbage=garbage_words, cleaned=cleaned_comment, similarity=similarity_ratio,
|
||||
# search_word=search_word, comment=comment, last_similarity=float(best_similarity.similarity))
|
||||
# )
|
||||
|
||||
if similarity_ratio > float(best_similarity.similarity):
|
||||
# Use person_id from the dictionary data
|
||||
person_id = person_data['id']
|
||||
for living_space_data in living_space_dict[iban]["living_space"]:
|
||||
if living_space_data.get('person_id') == person_id:
|
||||
# Create a dictionary with living space data
|
||||
living_space_info = {
|
||||
'id': living_space_data.get('id'),
|
||||
'build_parts_id': living_space_data.get('build_parts_id'),
|
||||
'name': living_space_data.get('name')
|
||||
}
|
||||
best_similarity.set_living_space_id(living_space_data.get('id'))
|
||||
best_similarity.set_found_from("Person Name")
|
||||
best_similarity.set_similarity(similarity_ratio)
|
||||
best_similarity.set_garbage(garbage_words)
|
||||
best_similarity.set_cleaned(cleaned_comment)
|
||||
best_similarity.set_customer_id(person_data['id'])
|
||||
# Find matching build part
|
||||
build_parts_id = living_space_data.get('build_parts_id')
|
||||
for build_part_data in living_space_dict[iban]["build_parts"]:
|
||||
if build_part_data.get('id') == build_parts_id:
|
||||
best_similarity.set_build_part_id(build_part_data.get('id'))
|
||||
break
|
||||
return best_similarity
|
||||
23
trash/XBuildLivingSpace/regex_func.py
Normal file
23
trash/XBuildLivingSpace/regex_func.py
Normal file
@@ -0,0 +1,23 @@
|
||||
import re
|
||||
|
||||
from difflib import get_close_matches
|
||||
from configs import AccountConfig
|
||||
|
||||
|
||||
def word_straighten(word, ref_list, threshold=0.8):
|
||||
matches = get_close_matches(word, ref_list, n=1, cutoff=threshold)
|
||||
return matches[0] if matches else word
|
||||
|
||||
|
||||
def category_finder(text, output_template="{kategori} {numara}"):
|
||||
categories = AccountConfig.CATEGORIES
|
||||
result = {category: [] for category in categories}
|
||||
for category, patterns in categories.items():
|
||||
words = re.split(r"\W+", text)
|
||||
straighten_words = [word_straighten(word, patterns) for word in words]
|
||||
straighten_text = " ".join(straighten_words)
|
||||
pattern = r"(?:\b|\s|^)(?:" + "|".join(map(re.escape, patterns)) + r")(?:\s*|:|\-|\#)*(\d+)(?:\b|$)"
|
||||
if founds_list := re.findall(pattern, straighten_text, re.IGNORECASE):
|
||||
list_of_output = [output_template.format(kategori=category, numara=num) for num in founds_list]
|
||||
result[category].extend([i for i in list_of_output if str(i).replace(" ", "")])
|
||||
return result
|
||||
26
trash/XBuildLivingSpace/run_app.sh
Normal file
26
trash/XBuildLivingSpace/run_app.sh
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Source the environment file directly
|
||||
. /env.sh
|
||||
|
||||
# Re-export all variables to ensure they're available to the Python script
|
||||
export POSTGRES_USER
|
||||
export POSTGRES_PASSWORD
|
||||
export POSTGRES_DB
|
||||
export POSTGRES_HOST
|
||||
export POSTGRES_PORT
|
||||
export POSTGRES_ENGINE
|
||||
export POSTGRES_POOL_PRE_PING
|
||||
export POSTGRES_POOL_SIZE
|
||||
export POSTGRES_MAX_OVERFLOW
|
||||
export POSTGRES_POOL_RECYCLE
|
||||
export POSTGRES_POOL_TIMEOUT
|
||||
export POSTGRES_ECHO
|
||||
|
||||
# Python environment variables
|
||||
export PYTHONPATH
|
||||
export PYTHONUNBUFFERED
|
||||
export PYTHONDONTWRITEBYTECODE
|
||||
|
||||
# Run the Python script
|
||||
/usr/local/bin/python /runner.py
|
||||
187
trash/XBuildLivingSpace/runner.py
Normal file
187
trash/XBuildLivingSpace/runner.py
Normal file
@@ -0,0 +1,187 @@
|
||||
from Schemas import AccountRecords, BuildIbans, BuildDecisionBook, Build, BuildLivingSpace, People, OccupantTypes, BuildParts, BuildDecisionBookPayments, ApiEnumDropdown
|
||||
from Controllers.Postgres.engine import get_session_factory
|
||||
from parser import parse_comment_with_name, parse_comment_with_name_iban_description
|
||||
from validations import Similarity
|
||||
import re
|
||||
import time
|
||||
from datetime import timedelta
|
||||
|
||||
|
||||
def account_save_search_result(account_record_main_session: AccountRecords, similarity_result: Similarity):
|
||||
|
||||
with AccountRecords.new_session() as session:
|
||||
AccountRecords.set_session(session)
|
||||
BuildParts.set_session(session)
|
||||
Build.set_session(session)
|
||||
BuildLivingSpace.set_session(session)
|
||||
People.set_session(session)
|
||||
|
||||
account_record = AccountRecords.query.filter_by(id=account_record_main_session.id).first()
|
||||
if not account_record:
|
||||
# print(f"Could not find account record with ID {account_record_main_session.id}")
|
||||
return
|
||||
|
||||
company_id = getattr(similarity_result, 'company_id', None)
|
||||
living_space_id = getattr(similarity_result, 'living_space_id', None)
|
||||
build_part_id = getattr(similarity_result, 'build_part_id', None)
|
||||
customer_id = getattr(similarity_result, 'customer_id', None)
|
||||
|
||||
part, build, found_customer = None, None, None
|
||||
|
||||
if living_space_id:
|
||||
found_customer = BuildLivingSpace.query.get(living_space_id)
|
||||
if build_part_id:
|
||||
part = BuildParts.query.get(build_part_id)
|
||||
elif found_customer and hasattr(found_customer, 'build_parts_id'):
|
||||
part = BuildParts.query.filter_by(id=found_customer.build_parts_id, human_livable=True).first()
|
||||
|
||||
if part:
|
||||
build = Build.query.filter_by(id=part.build_id).first()
|
||||
|
||||
account_record.similarity = similarity_result.similarity
|
||||
account_record.found_from = similarity_result.found_from
|
||||
account_record.company_id = company_id
|
||||
if company_id:
|
||||
company = People.query.get(company_id)
|
||||
account_record.company_uu_id = getattr(company, "uu_id", None) if company else None
|
||||
|
||||
account_record.build_parts_id = getattr(part, "id", None)
|
||||
account_record.build_parts_uu_id = getattr(part, "uu_id", None) if part else None
|
||||
|
||||
if not account_record.build_id and build:
|
||||
account_record.build_id = getattr(build, "id", None)
|
||||
account_record.build_uu_id = getattr(build, "uu_id", None)
|
||||
|
||||
account_record.living_space_id = living_space_id
|
||||
if found_customer:
|
||||
account_record.living_space_uu_id = getattr(found_customer, "uu_id", None)
|
||||
if customer_id:
|
||||
account_record.send_person_id = customer_id
|
||||
customer = People.query.get(customer_id)
|
||||
if customer:
|
||||
account_record.send_person_uu_id = getattr(customer, "uu_id", None)
|
||||
account_record.save()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Start timer
|
||||
start_time = time.time()
|
||||
print("Build Living Space Service is running...")
|
||||
|
||||
new_session = get_session_factory()
|
||||
flat_id_list = []
|
||||
build_living_space_dict = {}
|
||||
found_list = []
|
||||
account_records_ibans = []
|
||||
|
||||
with OccupantTypes.new_session() as occupant_types_session:
|
||||
OccupantTypes.set_session(occupant_types_session)
|
||||
flat_resident = OccupantTypes.query.filter_by(occupant_category_type="FL", occupant_code="FL-RES").first()
|
||||
flat_owner = OccupantTypes.query.filter_by(occupant_category_type="FL", occupant_code="FL-OWN").first()
|
||||
flat_tenant = OccupantTypes.query.filter_by(occupant_category_type="FL", occupant_code="FL-TEN").first()
|
||||
flat_represent = OccupantTypes.query.filter_by(occupant_category_type="FL", occupant_code="FL-REP").first()
|
||||
flat_id_list = [flat_resident.id, flat_owner.id, flat_tenant.id, flat_represent.id]
|
||||
|
||||
AccountRecords.set_session(new_session)
|
||||
BuildLivingSpace.set_session(new_session)
|
||||
BuildParts.set_session(new_session)
|
||||
People.set_session(new_session)
|
||||
account_records_ibans = AccountRecords.query.filter(AccountRecords.build_decision_book_id != None).distinct(AccountRecords.iban).all()
|
||||
|
||||
for account_records_iban in account_records_ibans:
|
||||
if account_records_iban.iban not in build_living_space_dict:
|
||||
build_parts = BuildParts.query.filter_by(build_id=account_records_iban.build_id, human_livable=True).all()
|
||||
build_parts_data = []
|
||||
for bp in build_parts:
|
||||
bp_dict = {'id': bp.id, 'build_id': bp.build_id, 'human_livable': bp.human_livable}
|
||||
if hasattr(bp, 'part_no'):
|
||||
bp_dict['part_no'] = bp.part_no
|
||||
build_parts_data.append(bp_dict)
|
||||
|
||||
living_spaces = BuildLivingSpace.query.filter(
|
||||
BuildLivingSpace.build_parts_id.in_([bp.id for bp in build_parts]), BuildLivingSpace.occupant_type_id.in_(flat_id_list),
|
||||
).all()
|
||||
living_spaces_data = []
|
||||
for ls in living_spaces:
|
||||
ls_dict = {'id': ls.id, 'build_parts_id': ls.build_parts_id, 'occupant_type_id': ls.occupant_type_id, 'person_id': ls.person_id}
|
||||
if hasattr(ls, 'name'):
|
||||
ls_dict['name'] = ls.name
|
||||
living_spaces_data.append(ls_dict)
|
||||
|
||||
living_spaces_people = [ls.person_id for ls in living_spaces if ls.person_id]
|
||||
people_list = People.query.filter(People.id.in_(living_spaces_people)).all()
|
||||
people_data = []
|
||||
for p in people_list:
|
||||
p_dict = {'id': p.id, 'name': p.firstname, 'surname': p.surname, 'middle_name': p.middle_name}
|
||||
p_dict['full_name'] = f"{p.firstname} {p.surname}".strip()
|
||||
people_data.append(p_dict)
|
||||
build_living_space_dict[str(account_records_iban.iban)] = {"people": people_data, "living_space": living_spaces_data, "build_parts": build_parts_data}
|
||||
|
||||
with AccountRecords.new_session() as query_session:
|
||||
AccountRecords.set_session(query_session)
|
||||
account_record_ids = [record.id for record in AccountRecords.query.filter(AccountRecords.build_decision_book_id != None).order_by(AccountRecords.bank_date.desc()).all()]
|
||||
|
||||
for account_id in account_record_ids:
|
||||
with AccountRecords.new_session() as record_session:
|
||||
AccountRecords.set_session(record_session)
|
||||
account_record = AccountRecords.query.filter_by(id=account_id).first()
|
||||
if not account_record:
|
||||
continue
|
||||
account_iban = account_record.iban
|
||||
account_process_comment = account_record.process_comment
|
||||
account_currency_value = account_record.currency_value
|
||||
account_similarity_value = float(account_record.similarity or 0.0)
|
||||
account_build_id = account_record.build_id
|
||||
|
||||
account_data = {"id": account_id, "iban": account_iban, "process_comment": account_process_comment, "currency_value": account_currency_value,
|
||||
"similarity": account_similarity_value, "build_id": account_build_id}
|
||||
|
||||
try:
|
||||
similarity_result = parse_comment_with_name(account_record=account_data, living_space_dict=build_living_space_dict)
|
||||
fs = float(similarity_result.similarity)
|
||||
|
||||
if fs >= 0.8 and fs >= account_similarity_value:
|
||||
found_list.append(similarity_result)
|
||||
with AccountRecords.new_session() as save_session:
|
||||
AccountRecords.set_session(save_session)
|
||||
fresh_account = AccountRecords.query.filter_by(id=account_id).first()
|
||||
if fresh_account:
|
||||
account_save_search_result(account_record_main_session=fresh_account, similarity_result=similarity_result)
|
||||
print("POSITIVE SIMILARITY RESULT:", {
|
||||
'similarity': similarity_result.similarity, 'found_from': similarity_result.found_from, 'garbage': similarity_result.garbage,
|
||||
'cleaned': similarity_result.cleaned, 'company_id': getattr(similarity_result, 'company_id', None),
|
||||
'living_space_id': getattr(similarity_result, 'living_space_id', None), 'build_part_id': getattr(similarity_result, 'build_part_id', None),
|
||||
'customer_id': getattr(similarity_result, 'customer_id', None)
|
||||
})
|
||||
else:
|
||||
similarity_result = parse_comment_with_name_iban_description(account_record=account_data)
|
||||
fs = float(similarity_result.similarity)
|
||||
|
||||
if fs >= 0.8 and fs > account_similarity_value:
|
||||
found_list.append(similarity_result)
|
||||
with AccountRecords.new_session() as save_session:
|
||||
AccountRecords.set_session(save_session)
|
||||
fresh_account = AccountRecords.query.filter_by(id=account_id).first()
|
||||
if fresh_account:
|
||||
account_save_search_result(account_record_main_session=fresh_account, similarity_result=similarity_result)
|
||||
print("NEGATIVE SIMILARITY RESULT:", {
|
||||
'similarity': similarity_result.similarity, 'found_from': similarity_result.found_from,
|
||||
'garbage': similarity_result.garbage, 'cleaned': similarity_result.cleaned,
|
||||
'company_id': getattr(similarity_result, 'company_id', None), 'living_space_id': getattr(similarity_result, 'living_space_id', None),
|
||||
'build_part_id': getattr(similarity_result, 'build_part_id', None), 'customer_id': getattr(similarity_result, 'customer_id', None)
|
||||
})
|
||||
except Exception as e:
|
||||
# print(f"Error processing account {account_id}: {e}")
|
||||
continue
|
||||
|
||||
# Calculate elapsed time
|
||||
end_time = time.time()
|
||||
elapsed_time = end_time - start_time
|
||||
elapsed_formatted = str(timedelta(seconds=int(elapsed_time)))
|
||||
|
||||
print("Account Records Search : ", len(found_list), "/", len(account_record_ids))
|
||||
print(f"Total runtime: {elapsed_formatted} (HH:MM:SS)")
|
||||
print(f"Total seconds: {elapsed_time:.2f}")
|
||||
|
||||
new_session.close()
|
||||
print("Build Living Space Service is finished...")
|
||||
49
trash/XBuildLivingSpace/validations.py
Normal file
49
trash/XBuildLivingSpace/validations.py
Normal file
@@ -0,0 +1,49 @@
|
||||
from Schemas import BuildLivingSpace, People
|
||||
|
||||
class Similarity:
|
||||
|
||||
def __init__(self, similarity: float, garbage: str, cleaned: str):
|
||||
self.similarity = similarity
|
||||
self.garbage = garbage
|
||||
self.cleaned = cleaned
|
||||
self.living_space = None
|
||||
self.living_space_id = None
|
||||
self.build_part_id = None
|
||||
self.company = None
|
||||
self.company_id = None
|
||||
self.found_from = None
|
||||
self.send_person_id = None
|
||||
self.customer_id = None
|
||||
|
||||
def set_customer_id(self, customer_id: int):
|
||||
self.customer_id = customer_id
|
||||
|
||||
def set_living_space(self, living_space: BuildLivingSpace):
|
||||
self.living_space = living_space
|
||||
|
||||
def set_company(self, company: People):
|
||||
self.company = company
|
||||
|
||||
def set_found_from(self, found_from: str):
|
||||
self.found_from = found_from
|
||||
|
||||
def set_send_person_id(self, send_person_id: int):
|
||||
self.send_person_id = send_person_id
|
||||
|
||||
def set_similarity(self, similarity: float):
|
||||
self.similarity = similarity
|
||||
|
||||
def set_garbage(self, garbage: str):
|
||||
self.garbage = garbage
|
||||
|
||||
def set_cleaned(self, cleaned: str):
|
||||
self.cleaned = cleaned
|
||||
|
||||
def set_living_space_id(self, living_space_id: int):
|
||||
self.living_space_id = living_space_id
|
||||
|
||||
def set_build_part_id(self, build_part_id: int):
|
||||
self.build_part_id = build_part_id
|
||||
|
||||
def set_company_id(self, company_id: int):
|
||||
self.company_id = company_id
|
||||
93
trash/XDecisionBook/.dockerignore
Normal file
93
trash/XDecisionBook/.dockerignore
Normal file
@@ -0,0 +1,93 @@
|
||||
# Git
|
||||
.git
|
||||
.gitignore
|
||||
.gitattributes
|
||||
|
||||
|
||||
# CI
|
||||
.codeclimate.yml
|
||||
.travis.yml
|
||||
.taskcluster.yml
|
||||
|
||||
# Docker
|
||||
docker-compose.yml
|
||||
service_app/Dockerfile
|
||||
.docker
|
||||
.dockerignore
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
**/__pycache__/
|
||||
**/*.py[cod]
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
service_app/env/
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.coverage
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Virtual environment
|
||||
service_app/.env
|
||||
.venv/
|
||||
venv/
|
||||
|
||||
# PyCharm
|
||||
.idea
|
||||
|
||||
# Python mode for VIM
|
||||
.ropeproject
|
||||
**/.ropeproject
|
||||
|
||||
# Vim swap files
|
||||
**/*.swp
|
||||
|
||||
# VS Code
|
||||
.vscode/
|
||||
|
||||
test_application/
|
||||
|
||||
|
||||
33
trash/XDecisionBook/Dockerfile
Normal file
33
trash/XDecisionBook/Dockerfile
Normal file
@@ -0,0 +1,33 @@
|
||||
FROM python:3.12-slim
|
||||
|
||||
WORKDIR /
|
||||
|
||||
# Set Python path to include app directory
|
||||
ENV PYTHONPATH=/ PYTHONUNBUFFERED=1 PYTHONDONTWRITEBYTECODE=1
|
||||
|
||||
# Install system dependencies and Poetry
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends gcc && rm -rf /var/lib/apt/lists/* && pip install --no-cache-dir poetry
|
||||
|
||||
# Copy Poetry configuration
|
||||
COPY /pyproject.toml ./pyproject.toml
|
||||
|
||||
# Configure Poetry and install dependencies with optimizations
|
||||
RUN poetry config virtualenvs.create false && poetry install --no-interaction --no-ansi --no-root --only main && pip cache purge && rm -rf ~/.cache/pypoetry
|
||||
|
||||
# Install cron for scheduling tasks
|
||||
RUN apt-get update && apt-get install -y cron
|
||||
|
||||
# Copy application code
|
||||
COPY /ServicesBank/Finder/DecisionBook /
|
||||
COPY /ServicesApi/Schemas /Schemas
|
||||
COPY /ServicesApi/Controllers /Controllers
|
||||
|
||||
# Create log file to grab cron logs
|
||||
RUN touch /var/log/cron.log
|
||||
|
||||
# Make entrypoint script executable
|
||||
RUN chmod +x /entrypoint.sh
|
||||
RUN chmod +x /run_app.sh
|
||||
|
||||
# Use entrypoint script to update run_app.sh with environment variables and start cron
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
3
trash/XDecisionBook/README.md
Normal file
3
trash/XDecisionBook/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Docs of Finder
|
||||
|
||||
Finds people, living spaces, companies from AccountRecords
|
||||
30
trash/XDecisionBook/entrypoint.sh
Normal file
30
trash/XDecisionBook/entrypoint.sh
Normal file
@@ -0,0 +1,30 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Create environment file that will be available to cron jobs
|
||||
echo "POSTGRES_USER=\"$POSTGRES_USER\"" >> /env.sh
|
||||
echo "POSTGRES_PASSWORD=\"$POSTGRES_PASSWORD\"" >> /env.sh
|
||||
echo "POSTGRES_DB=\"$POSTGRES_DB\"" >> /env.sh
|
||||
echo "POSTGRES_HOST=\"$POSTGRES_HOST\"" >> /env.sh
|
||||
echo "POSTGRES_PORT=$POSTGRES_PORT" >> /env.sh
|
||||
echo "POSTGRES_ENGINE=\"$POSTGRES_ENGINE\"" >> /env.sh
|
||||
echo "POSTGRES_POOL_PRE_PING=\"$POSTGRES_POOL_PRE_PING\"" >> /env.sh
|
||||
echo "POSTGRES_POOL_SIZE=$POSTGRES_POOL_SIZE" >> /env.sh
|
||||
echo "POSTGRES_MAX_OVERFLOW=$POSTGRES_MAX_OVERFLOW" >> /env.sh
|
||||
echo "POSTGRES_POOL_RECYCLE=$POSTGRES_POOL_RECYCLE" >> /env.sh
|
||||
echo "POSTGRES_POOL_TIMEOUT=$POSTGRES_POOL_TIMEOUT" >> /env.sh
|
||||
echo "POSTGRES_ECHO=\"$POSTGRES_ECHO\"" >> /env.sh
|
||||
|
||||
# Add Python environment variables
|
||||
echo "PYTHONPATH=/" >> /env.sh
|
||||
echo "PYTHONUNBUFFERED=1" >> /env.sh
|
||||
echo "PYTHONDONTWRITEBYTECODE=1" >> /env.sh
|
||||
|
||||
# Make the environment file available to cron
|
||||
echo "*/15 * * * * /run_app.sh >> /var/log/cron.log 2>&1" > /tmp/crontab_list
|
||||
crontab /tmp/crontab_list
|
||||
|
||||
# Start cron
|
||||
cron
|
||||
|
||||
# Tail the log file
|
||||
tail -f /var/log/cron.log
|
||||
26
trash/XDecisionBook/run_app.sh
Normal file
26
trash/XDecisionBook/run_app.sh
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Source the environment file directly
|
||||
. /env.sh
|
||||
|
||||
# Re-export all variables to ensure they're available to the Python script
|
||||
export POSTGRES_USER
|
||||
export POSTGRES_PASSWORD
|
||||
export POSTGRES_DB
|
||||
export POSTGRES_HOST
|
||||
export POSTGRES_PORT
|
||||
export POSTGRES_ENGINE
|
||||
export POSTGRES_POOL_PRE_PING
|
||||
export POSTGRES_POOL_SIZE
|
||||
export POSTGRES_MAX_OVERFLOW
|
||||
export POSTGRES_POOL_RECYCLE
|
||||
export POSTGRES_POOL_TIMEOUT
|
||||
export POSTGRES_ECHO
|
||||
|
||||
# Python environment variables
|
||||
export PYTHONPATH
|
||||
export PYTHONUNBUFFERED
|
||||
export PYTHONDONTWRITEBYTECODE
|
||||
|
||||
# env >> /var/log/cron.log
|
||||
/usr/local/bin/python /runner.py
|
||||
29
trash/XDecisionBook/runner.py
Normal file
29
trash/XDecisionBook/runner.py
Normal file
@@ -0,0 +1,29 @@
|
||||
from sqlalchemy import cast, Date
|
||||
from Schemas import AccountRecords, BuildIbans, BuildDecisionBook
|
||||
|
||||
|
||||
def account_records_find_decision_book(session):
|
||||
AccountRecords.set_session(session)
|
||||
BuildIbans.set_session(session)
|
||||
BuildDecisionBook.set_session(session)
|
||||
|
||||
created_ibans, iban_build_dict = [], {}
|
||||
filter_account_records = AccountRecords.build_id != None, AccountRecords.build_decision_book_id == None
|
||||
account_records_list: list[AccountRecords] = AccountRecords.query.filter(*filter_account_records).order_by(AccountRecords.bank_date.desc()).all()
|
||||
for account_record in account_records_list:
|
||||
if found_iban := BuildIbans.query.filter(BuildIbans.iban == account_record.iban).first():
|
||||
if found_decision_book := BuildDecisionBook.query.filter(
|
||||
BuildDecisionBook.build_id == found_iban.build_id,
|
||||
cast(BuildDecisionBook.expiry_starts, Date) <= cast(account_record.bank_date, Date),
|
||||
cast(BuildDecisionBook.expiry_ends, Date) >= cast(account_record.bank_date, Date),
|
||||
).first():
|
||||
account_record.build_decision_book_id = found_decision_book.id
|
||||
account_record.build_decision_book_uu_id = str(found_decision_book.uu_id)
|
||||
account_record.save()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("DecisionBook Service is running...")
|
||||
with AccountRecords.new_session() as session:
|
||||
account_records_find_decision_book(session)
|
||||
print("DecisionBook Service is finished...")
|
||||
Reference in New Issue
Block a user