Debugging scripts import hell.
This commit is contained in:
@@ -21,10 +21,10 @@ def set_sqlite_pragma(dbapi_connection, connection_record):
|
||||
if ctx.database_schema == "sqlite":
|
||||
execution_phrase = "PRAGMA foreign_keys=ON"
|
||||
else:
|
||||
print("Nothing to execute, returning")
|
||||
# print("Nothing to execute, returning")
|
||||
cursor.close()
|
||||
return
|
||||
print(f"Executing {execution_phrase} in sql.")
|
||||
print(f"Executing '{execution_phrase}' in sql.")
|
||||
cursor.execute(execution_phrase)
|
||||
cursor.close()
|
||||
|
||||
@@ -34,7 +34,7 @@ from .models import *
|
||||
|
||||
def update_log(mapper, connection, target):
|
||||
state = inspect(target)
|
||||
object_name = state.object.truncated_name()
|
||||
object_name = state.object.truncated_name
|
||||
update = dict(user=getuser(), time=datetime.now(), object=object_name, changes=[])
|
||||
for attr in state.attrs:
|
||||
hist = attr.load_history()
|
||||
|
||||
@@ -24,6 +24,7 @@ logger = logging.getLogger(f"submissions.{__name__}")
|
||||
class LogMixin(Base):
|
||||
__abstract__ = True
|
||||
|
||||
@property
|
||||
def truncated_name(self):
|
||||
name = str(self)
|
||||
if len(name) > 64:
|
||||
|
||||
@@ -368,7 +368,7 @@ class IridaControl(Control):
|
||||
polymorphic_load="inline",
|
||||
inherit_condition=(id == Control.id))
|
||||
|
||||
@validates("sub_type")
|
||||
@validates("subtype")
|
||||
def enforce_subtype_literals(self, key: str, value: str) -> str:
|
||||
"""
|
||||
Validates sub_type field with acceptable values
|
||||
|
||||
@@ -738,7 +738,13 @@ class SubmissionType(BaseClass):
|
||||
return f"<SubmissionType({self.name})>"
|
||||
|
||||
@classmethod
|
||||
def retrieve_template_file(cls):
|
||||
def retrieve_template_file(cls) -> bytes:
|
||||
"""
|
||||
Grabs the default excel template file.
|
||||
|
||||
Returns:
|
||||
bytes: The excel sheet.
|
||||
"""
|
||||
submission_type = cls.query(name="Bacterial Culture")
|
||||
return submission_type.template_file
|
||||
|
||||
|
||||
@@ -145,6 +145,7 @@ class ReportMaker(object):
|
||||
if cell.row > 1:
|
||||
cell.style = 'Currency'
|
||||
|
||||
|
||||
class TurnaroundMaker(ReportArchetype):
|
||||
|
||||
def __init__(self, start_date: date, end_date: date, submission_type:str):
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
from pathlib import Path
|
||||
import importlib
|
||||
|
||||
p = Path(__file__).parent.absolute()
|
||||
subs = [item.stem for item in p.glob("*.py") if "__" not in item.stem]
|
||||
modules = {}
|
||||
for sub in subs:
|
||||
importlib.import_module(f"backend.scripts.{sub}")
|
||||
@@ -1,45 +0,0 @@
|
||||
"""
|
||||
script meant to copy database data to new file. Currently for Sqlite only
|
||||
"""
|
||||
import logging, shutil
|
||||
from datetime import date
|
||||
from pathlib import Path
|
||||
from tools import Settings
|
||||
import pyodbc
|
||||
|
||||
logger = logging.getLogger(f"submissions.{__name__}")
|
||||
|
||||
|
||||
def script(ctx: Settings):
|
||||
"""
|
||||
Copies the database into the backup directory the first time it is opened every month.
|
||||
"""
|
||||
month = date.today().strftime("%Y-%m")
|
||||
current_month_bak = Path(ctx.backup_path).joinpath(f"submissions_backup-{month}").resolve()
|
||||
logger.info(f"Here is the db directory: {ctx.database_path}")
|
||||
logger.info(f"Here is the backup directory: {ctx.backup_path}")
|
||||
match ctx.database_schema:
|
||||
case "sqlite":
|
||||
db_path = ctx.database_path.joinpath(ctx.database_name).with_suffix(".db")
|
||||
current_month_bak = current_month_bak.with_suffix(".db")
|
||||
if not current_month_bak.exists() and "Archives" not in db_path.__str__():
|
||||
logger.info("No backup found for this month, backing up database.")
|
||||
try:
|
||||
shutil.copyfile(db_path, current_month_bak)
|
||||
except PermissionError as e:
|
||||
logger.error(f"Couldn't backup database due to: {e}")
|
||||
case "postgresql+psycopg2":
|
||||
logger.warning(f"Backup function not yet implemented for psql")
|
||||
current_month_bak = current_month_bak.with_suffix(".psql")
|
||||
case "mssql+pyodbc":
|
||||
logger.warning(f"{ctx.database_schema} backup is currently experiencing permission issues")
|
||||
current_month_bak = current_month_bak.with_suffix(".bak")
|
||||
return
|
||||
if not current_month_bak.exists():
|
||||
logger.info(f"No backup found for this month, backing up database to {current_month_bak}.")
|
||||
connection = pyodbc.connect(driver='{ODBC Driver 18 for SQL Server}',
|
||||
server=f'{ctx.database_path}', database=f'{ctx.database_name}',
|
||||
trusted_connection='yes', trustservercertificate="yes", autocommit=True)
|
||||
backup = f"BACKUP DATABASE [{ctx.database_name}] TO DISK = N'{current_month_bak}'"
|
||||
cursor = connection.cursor().execute(backup)
|
||||
connection.close()
|
||||
@@ -1,5 +0,0 @@
|
||||
"""
|
||||
Test script for teardown_scripts
|
||||
"""
|
||||
def script(ctx):
|
||||
print("\n\nGoodbye. Thank you for using Robotics Submission Tracker.\n\n")
|
||||
@@ -1,5 +0,0 @@
|
||||
"""
|
||||
Test script for startup_scripts
|
||||
"""
|
||||
def script(ctx):
|
||||
print("\n\nHello! Welcome to Robotics Submission Tracker.\n\n")
|
||||
@@ -1,67 +0,0 @@
|
||||
import logging, sqlite3, json
|
||||
from pprint import pformat, pprint
|
||||
from datetime import datetime
|
||||
from tools import Settings
|
||||
from backend import BasicSample
|
||||
from backend.db import IridaControl, ControlType
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
logger = logging.getLogger(f"submissions.{__name__}")
|
||||
|
||||
|
||||
def script(ctx: Settings):
|
||||
"""
|
||||
Grabs Irida controls from secondary database.
|
||||
|
||||
Args:
|
||||
ctx (Settings): Settings inherited from app.
|
||||
"""
|
||||
# NOTE: Because the main session will be busy in another thread, this requires a new session.
|
||||
new_session = Session(ctx.database_session.get_bind())
|
||||
# ct = ControlType.query(name="Irida Control")
|
||||
ct = new_session.query(ControlType).filter(ControlType.name == "Irida Control").first()
|
||||
# existing_controls = [item.name for item in IridaControl.query()]
|
||||
existing_controls = [item.name for item in new_session.query(IridaControl)]
|
||||
prm_list = ", ".join([f"'{thing}'" for thing in existing_controls])
|
||||
ctrl_db_path = ctx.directory_path.joinpath("submissions_parser_output", "submissions.db")
|
||||
try:
|
||||
conn = sqlite3.connect(ctrl_db_path)
|
||||
except AttributeError as e:
|
||||
logger.error(f"Error, could not import from irida due to {e}")
|
||||
return
|
||||
sql = "SELECT name, submitted_date, submission_id, contains, matches, kraken, subtype, refseq_version, " \
|
||||
"kraken2_version, kraken2_db_version, sample_id FROM _iridacontrol INNER JOIN _control on _control.id " \
|
||||
f"= _iridacontrol.id WHERE _control.name NOT IN ({prm_list})"
|
||||
cursor = conn.execute(sql)
|
||||
records = [
|
||||
dict(name=row[0], submitted_date=row[1], submission_id=row[2], contains=row[3], matches=row[4], kraken=row[5],
|
||||
subtype=row[6], refseq_version=row[7], kraken2_version=row[8], kraken2_db_version=row[9],
|
||||
sample_id=row[10]) for row in cursor]
|
||||
for record in records:
|
||||
# instance = IridaControl.query(name=record['name'])
|
||||
instance = new_session.query(IridaControl).filter(IridaControl.name == record['name']).first()
|
||||
if instance:
|
||||
logger.warning(f"Irida Control {instance.name} already exists, skipping.")
|
||||
continue
|
||||
for thing in ['contains', 'matches', 'kraken']:
|
||||
if record[thing]:
|
||||
record[thing] = json.loads(record[thing])
|
||||
assert isinstance(record[thing], dict)
|
||||
else:
|
||||
record[thing] = {}
|
||||
# record['matches'] = json.loads(record['matches'])
|
||||
# assert isinstance(record['matches'], dict)
|
||||
# record['kraken'] = json.loads(record['kraken'])
|
||||
# assert isinstance(record['kraken'], dict)
|
||||
record['submitted_date'] = datetime.strptime(record['submitted_date'], "%Y-%m-%d %H:%M:%S.%f")
|
||||
assert isinstance(record['submitted_date'], datetime)
|
||||
instance = IridaControl(controltype=ct, **record)
|
||||
# sample = BasicSample.query(submitter_id=instance.name)
|
||||
sample = new_session.query(BasicSample).filter(BasicSample.submitter_id == instance.name).first()
|
||||
if sample:
|
||||
instance.sample = sample
|
||||
instance.submission = sample.submissions[0]
|
||||
# instance.save()
|
||||
new_session.add(instance)
|
||||
new_session.commit()
|
||||
new_session.close()
|
||||
@@ -901,7 +901,7 @@ class PydSubmission(BaseModel, extra='allow'):
|
||||
return render
|
||||
|
||||
# @report_result
|
||||
def check_kit_integrity(self, extraction_kit: str | dict | None = None, exempt:List[PydReagent]=[]) -> Tuple[
|
||||
def check_kit_integrity(self, extraction_kit: str | dict | None = None, exempt: List[PydReagent] = []) -> Tuple[
|
||||
List[PydReagent], Report]:
|
||||
"""
|
||||
Ensures all reagents expected in kit are listed in Submission
|
||||
@@ -929,16 +929,40 @@ class PydSubmission(BaseModel, extra='allow'):
|
||||
missing_reagents = [rt for rt in ext_kit_rtypes if rt.role not in missing_check and rt.role not in exempt]
|
||||
# logger.debug(f"Missing reagents: {missing_reagents}")
|
||||
missing_reagents += [rt for rt in output_reagents if rt.missing]
|
||||
logger.debug(pformat(missing_reagents))
|
||||
output_reagents += [rt for rt in missing_reagents if rt not in output_reagents]
|
||||
# NOTE: if lists are equal return no problem
|
||||
if len(missing_reagents) == 0:
|
||||
result = None
|
||||
else:
|
||||
result = Result(
|
||||
msg=f"The excel sheet you are importing is missing some reagents expected by the kit.\n\nIt looks like you are missing: {[item.role.upper() for item in missing_reagents]}\n\nAlternatively, you may have set the wrong extraction kit.\n\nThe program will populate lists using existing reagents.\n\nPlease make sure you check the lots carefully!",
|
||||
status="Warning")
|
||||
msg=f"The excel sheet you are importing is missing some reagents expected by the kit.\n\nIt looks like you are missing: {[item.role.upper() for item in missing_reagents]}\n\nAlternatively, you may have set the wrong extraction kit.\n\nThe program will populate lists using existing reagents.\n\nPlease make sure you check the lots carefully!",
|
||||
status="Warning")
|
||||
report.add_result(result)
|
||||
return output_reagents, report
|
||||
return output_reagents, report, missing_reagents
|
||||
|
||||
def check_reagent_expiries(self, exempt: List[PydReagent]=[]):
|
||||
report = Report()
|
||||
expired = []
|
||||
for reagent in self.reagents:
|
||||
if reagent not in exempt:
|
||||
role_expiry = ReagentRole.query(name=reagent.role).eol_ext
|
||||
try:
|
||||
dt = datetime.combine(reagent.expiry, datetime.min.time())
|
||||
except TypeError:
|
||||
continue
|
||||
if datetime.now() > dt + role_expiry:
|
||||
expired.append(f"{reagent.role}, {reagent.lot}: {reagent.expiry} + {role_expiry.days}")
|
||||
if expired:
|
||||
output = '\n'.join(expired)
|
||||
result = Result(status="Warning",
|
||||
msg = f"The following reagents are expired:\n\n{output}"
|
||||
)
|
||||
report.add_result(result)
|
||||
return report
|
||||
|
||||
|
||||
|
||||
|
||||
def export_csv(self, filename: Path | str):
|
||||
try:
|
||||
|
||||
Reference in New Issue
Block a user