diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6ae2769..710623a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,7 @@
+# 202412.04
+
+- Update of wastewater to allow for duplex PCR primers.
+
## 202412.03
- Automated truncating of object names longer than 64 chars going into _auditlog
diff --git a/src/submissions/__init__.py b/src/submissions/__init__.py
index ddb5d95..ec0dd1a 100644
--- a/src/submissions/__init__.py
+++ b/src/submissions/__init__.py
@@ -4,12 +4,11 @@ from pathlib import Path
from datetime import date
import calendar
-# Version of the realpython-reader package
-
year = date.today().year
month = date.today().month
day = date.today().day
+
def get_week_of_month() -> int:
"""
Gets the current week number of the month.
@@ -21,15 +20,17 @@ def get_week_of_month() -> int:
if day in week:
return ii + 1
+
# Automatically completes project info for help menu and compiling.
__project__ = "submissions"
__version__ = f"{year}{str(month).zfill(2)}.{get_week_of_month()}b"
-__author__ = {"name":"Landon Wark", "email":"Landon.Wark@phac-aspc.gc.ca"}
+__author__ = {"name": "Landon Wark", "email": "Landon.Wark@phac-aspc.gc.ca"}
__copyright__ = f"2022-{year}, Government of Canada"
__github__ = "https://github.com/landowark/submissions"
project_path = Path(__file__).parents[2].absolute()
+
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
@@ -40,26 +41,3 @@ class bcolors:
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
-
-# Hello Landon, this is your past self here. I'm trying not to screw you over like I usually do, so I will
-# set out the workflow I've imagined for creating new submission types.
-# First of all, you will need to write new parsing methods in backend.excel.parser to pull information out of the submission form
-# for the submission itself as well as for any samples you can pull out of that same workbook.
-# The workbooks no longer need a sheet map, but they do need their submission type put in the categories metadata of the client excel template.
-# Second, you will have to update the model in backend.db.models.submissions and provide a new polymorph to the BasicSubmission object.
-# The BSO should hold the majority of the general info.
-# You can also update any of the parsers to pull out any custom info you need, like enforcing RSL plate numbers, scraping PCR results, etc.
-
-# Landon, this is your slightly less past self here. For the most part, Past Landon has not screwed us. I've been able to add in the
-# Wastewater Artic with minimal difficulties, except that the parser of the non-standard, user-generated excel sheets required slightly
-# more work.
-
-# Landon, this is your even more slightly less past self here. I've overhauled a lot of stuff to make things more flexible, so you should
-# hopefully be even less screwed than before... at least with regards to parsers. The addition of kits and such is another story. Putting that
-# On the todo list.
-
-'''
-Landon, this is 2023-11-07 Landon here in a comment string no less. Really all you should have to do now to add in new experiments is create a new
-BasicSubmission derivative with associated SubbmissionType, BasicSample (and maybe SubmissionSampleAssociation if you're feeling lucky), oh, also,
-kits, reagenttypes, reagents... This is sounding less and less impressive as I type it.
-'''
\ No newline at end of file
diff --git a/src/submissions/__main__.py b/src/submissions/__main__.py
index 9dde891..6a153f0 100644
--- a/src/submissions/__main__.py
+++ b/src/submissions/__main__.py
@@ -8,6 +8,7 @@ if check_if_app():
# setup custom logger
logger = setup_logger(verbosity=3)
+# from backend.scripts import modules
from backend import scripts
from PyQt6.QtWidgets import QApplication
from frontend.widgets.app import App
@@ -22,11 +23,12 @@ def run_startup():
for script in startup_scripts:
try:
func = getattr(scripts, script)
+ # func = modules[script]
except AttributeError as e:
logger.error(f"Couldn't run startup script {script} due to {e}")
continue
logger.info(f"Running startup script: {func.__name__}")
- func(ctx)
+ func.script(ctx)
def run_teardown():
@@ -38,11 +40,12 @@ def run_teardown():
for script in teardown_scripts:
try:
func = getattr(scripts, script)
+ # func = modules[script]
except AttributeError as e:
logger.error(f"Couldn't run teardown script {script} due to {e}")
continue
logger.info(f"Running teardown script: {func.__name__}")
- func(ctx)
+ func.script(ctx)
if __name__ == '__main__':
run_startup()
diff --git a/src/submissions/backend/db/models/kits.py b/src/submissions/backend/db/models/kits.py
index b01c490..e7942a7 100644
--- a/src/submissions/backend/db/models/kits.py
+++ b/src/submissions/backend/db/models/kits.py
@@ -2,12 +2,12 @@
All kit and reagent related models
"""
from __future__ import annotations
-import datetime, json, zipfile, yaml, logging, re
+import json, zipfile, yaml, logging, re
from pprint import pformat
from sqlalchemy import Column, String, TIMESTAMP, JSON, INTEGER, ForeignKey, Interval, Table, FLOAT, BLOB
from sqlalchemy.orm import relationship, validates, Query
from sqlalchemy.ext.associationproxy import association_proxy
-from datetime import date, datetime
+from datetime import date, datetime, timedelta
from tools import check_authorization, setup_lookup, Report, Result, check_regex_match, yaml_regex_creator, timezone
from typing import List, Literal, Generator, Any
from pandas import ExcelFile
@@ -259,6 +259,79 @@ class KitType(BaseClass):
base_dict['equipment_roles'].append(v)
return base_dict
+ @classmethod
+ def import_from_yml(cls, submission_type:str|SubmissionType, filepath: Path | str | None = None, import_dict: dict | None = None) -> KitType:
+ if isinstance(submission_type, str):
+ submission_type = SubmissionType.query(name=submission_type)
+ if filepath:
+ yaml.add_constructor("!regex", yaml_regex_creator)
+ if isinstance(filepath, str):
+ filepath = Path(filepath)
+ if not filepath.exists():
+ logging.critical(f"Given file could not be found.")
+ return None
+ with open(filepath, "r") as f:
+ if filepath.suffix == ".json":
+ import_dict = json.load(fp=f)
+ elif filepath.suffix == ".yml":
+ import_dict = yaml.load(stream=f, Loader=yaml.Loader)
+ else:
+ raise Exception(f"Filetype {filepath.suffix} not supported.")
+ new_kit = KitType.query(name=import_dict['kit_type']['name'])
+ if not new_kit:
+ new_kit = KitType(name=import_dict['kit_type']['name'])
+ for role in import_dict['kit_type']['reagent_roles']:
+ new_role = ReagentRole.query(name=role['role'])
+ if new_role:
+ check = input(f"Found existing role: {new_role.name}. Use this? [Y/n]: ")
+ if check.lower() == "n":
+ new_role = None
+ else:
+ pass
+ if not new_role:
+ eol = timedelta(role['extension_of_life'])
+ new_role = ReagentRole(name=role['role'], eol_ext=eol)
+ uses = dict(expiry=role['expiry'], lot=role['lot'], name=role['name'], sheet=role['sheet'])
+ ktrr_assoc = KitTypeReagentRoleAssociation(kit_type=new_kit, reagent_role=new_role, uses=uses)
+ ktrr_assoc.submission_type = submission_type
+ ktrr_assoc.required = role['required']
+ ktst_assoc = SubmissionTypeKitTypeAssociation(
+ kit_type=new_kit,
+ submission_type=submission_type,
+ mutable_cost_sample=import_dict['mutable_cost_sample'],
+ mutable_cost_column=import_dict['mutable_cost_column'],
+ constant_cost=import_dict['constant_cost']
+ )
+ for role in import_dict['kit_type']['equipment_roles']:
+ new_role = EquipmentRole.query(name=role['role'])
+ if new_role:
+ check = input(f"Found existing role: {new_role.name}. Use this? [Y/n]: ")
+ if check.lower() == "n":
+ new_role = None
+ else:
+ pass
+ if not new_role:
+ new_role = EquipmentRole(name=role['role'])
+ for equipment in Equipment.assign_equipment(equipment_role=new_role):
+ new_role.instances.append(equipment)
+ ster_assoc = SubmissionTypeEquipmentRoleAssociation(submission_type=submission_type,
+ equipment_role=new_role)
+ try:
+ uses = dict(name=role['name'], process=role['process'], sheet=role['sheet'],
+ static=role['static'])
+ except KeyError:
+ uses = None
+ ster_assoc.uses = uses
+ for process in role['processes']:
+ new_process = Process.query(name=process)
+ if not new_process:
+ new_process = Process(name=process)
+ new_process.submission_types.append(submission_type)
+ new_process.kit_types.append(new_kit)
+ new_process.equipment_roles.append(new_role)
+ return new_kit
+
+
class ReagentRole(BaseClass):
"""
@@ -903,58 +976,7 @@ class SubmissionType(BaseClass):
submission_type.sample_map = import_dict['samples']
submission_type.defaults = import_dict['defaults']
for kit in import_dict['kits']:
- new_kit = KitType.query(name=kit['kit_type']['name'])
- if not new_kit:
- new_kit = KitType(name=kit['kit_type']['name'])
- for role in kit['kit_type']['reagent roles']:
- new_role = ReagentRole.query(name=role['role'])
- if new_role:
- check = input(f"Found existing role: {new_role.name}. Use this? [Y/n]: ")
- if check.lower() == "n":
- new_role = None
- else:
- pass
- if not new_role:
- eol = datetime.timedelta(role['extension_of_life'])
- new_role = ReagentRole(name=role['role'], eol_ext=eol)
- uses = dict(expiry=role['expiry'], lot=role['lot'], name=role['name'], sheet=role['sheet'])
- ktrr_assoc = KitTypeReagentRoleAssociation(kit_type=new_kit, reagent_role=new_role, uses=uses)
- ktrr_assoc.submission_type = submission_type
- ktrr_assoc.required = role['required']
- ktst_assoc = SubmissionTypeKitTypeAssociation(
- kit_type=new_kit,
- submission_type=submission_type,
- mutable_cost_sample=kit['mutable_cost_sample'],
- mutable_cost_column=kit['mutable_cost_column'],
- constant_cost=kit['constant_cost']
- )
- for role in kit['kit_type']['equipment roles']:
- new_role = EquipmentRole.query(name=role['role'])
- if new_role:
- check = input(f"Found existing role: {new_role.name}. Use this? [Y/n]: ")
- if check.lower() == "n":
- new_role = None
- else:
- pass
- if not new_role:
- new_role = EquipmentRole(name=role['role'])
- for equipment in Equipment.assign_equipment(equipment_role=new_role):
- new_role.instances.append(equipment)
- ster_assoc = SubmissionTypeEquipmentRoleAssociation(submission_type=submission_type,
- equipment_role=new_role)
- try:
- uses = dict(name=role['name'], process=role['process'], sheet=role['sheet'],
- static=role['static'])
- except KeyError:
- uses = None
- ster_assoc.uses = uses
- for process in role['processes']:
- new_process = Process.query(name=process)
- if not new_process:
- new_process = Process(name=process)
- new_process.submission_types.append(submission_type)
- new_process.kit_types.append(new_kit)
- new_process.equipment_roles.append(new_role)
+ new_kit = KitType.import_from_yml(submission_type=submission_type, import_dict=kit)
if 'orgs' in import_dict.keys():
logger.info("Found Organizations to be imported.")
Organization.import_from_yml(filepath=filepath)
@@ -1321,7 +1343,8 @@ class Equipment(BaseClass, LogMixin):
else:
return {k: v for k, v in self.__dict__.items()}
- def get_processes(self, submission_type: str | SubmissionType | None = None, extraction_kit: str | KitType | None = None) -> List[str]:
+ def get_processes(self, submission_type: str | SubmissionType | None = None,
+ extraction_kit: str | KitType | None = None) -> List[str]:
"""
Get all processes associated with this Equipment for a given SubmissionType
@@ -1399,7 +1422,7 @@ class Equipment(BaseClass, LogMixin):
from backend.validators.pydant import PydEquipment
processes = self.get_processes(submission_type=submission_type, extraction_kit=extraction_kit)
return PydEquipment(processes=processes, role=role,
- **self.to_dict(processes=False))
+ **self.to_dict(processes=False))
@classmethod
def get_regex(cls) -> re.Pattern:
@@ -1547,9 +1570,9 @@ class EquipmentRole(BaseClass):
extraction_kit = KitType.query(name=extraction_kit)
for process in self.processes:
if submission_type and submission_type not in process.submission_types:
- continue
+ continue
if extraction_kit and extraction_kit not in process.kit_types:
- continue
+ continue
yield process.name
def to_export_dict(self, submission_type: SubmissionType, kit_type: KitType):
@@ -1597,7 +1620,6 @@ class SubmissionEquipmentAssociation(BaseClass):
Returns:
dict: This SubmissionEquipmentAssociation as a dictionary
"""
- # TODO: Currently this will only fetch a single process, even if multiple are selectable.
try:
process = self.process.name
except AttributeError:
@@ -1606,7 +1628,13 @@ class SubmissionEquipmentAssociation(BaseClass):
processes=[process], role=self.role, nickname=self.equipment.nickname)
return output
- def to_pydantic(self):
+ def to_pydantic(self) -> "PydEquipment":
+ """
+ Returns a pydantic model based on this object.
+
+ Returns:
+ PydEquipment: pydantic equipment model
+ """
from backend.validators import PydEquipment
return PydEquipment(**self.to_sub_dict())
diff --git a/src/submissions/backend/db/models/submissions.py b/src/submissions/backend/db/models/submissions.py
index 8841441..9132375 100644
--- a/src/submissions/backend/db/models/submissions.py
+++ b/src/submissions/backend/db/models/submissions.py
@@ -2,6 +2,8 @@
Models for the main submission and sample types.
"""
from __future__ import annotations
+
+from collections import OrderedDict
from copy import deepcopy
from getpass import getuser
import logging, uuid, tempfile, re, base64, numpy as np, pandas as pd, types, sys
@@ -559,7 +561,7 @@ class BasicSubmission(BaseClass, LogMixin):
except AttributeError as e:
logger.error(f"Could not set {self} attribute {key} to {value} due to \n{e}")
- def update_subsampassoc(self, sample: BasicSample, input_dict: dict):
+ def update_subsampassoc(self, sample: BasicSample, input_dict: dict) -> SubmissionSampleAssociation:
"""
Update a joined submission sample association.
@@ -568,7 +570,7 @@ class BasicSubmission(BaseClass, LogMixin):
input_dict (dict): values to be updated
Returns:
- Result: _description_
+ SubmissionSampleAssociation: Updated association
"""
try:
assoc = next(item for item in self.submission_sample_associations if item.sample == sample)
@@ -583,14 +585,14 @@ class BasicSubmission(BaseClass, LogMixin):
# NOTE: for some reason I don't think assoc.__setattr__(k, v) works here.
except AttributeError:
logger.error(f"Can't set {k} to {v}")
- result = assoc.save()
- return result
+ return assoc
def update_reagentassoc(self, reagent: Reagent, role: str):
from backend.db import SubmissionReagentAssociation
# NOTE: get the first reagent assoc that fills the given role.
try:
- assoc = next(item for item in self.submission_reagent_associations if item.reagent and role in [role.name for role in item.reagent.role])
+ assoc = next(item for item in self.submission_reagent_associations if
+ item.reagent and role in [role.name for role in item.reagent.role])
assoc.reagent = reagent
except StopIteration as e:
logger.error(f"Association for {role} not found, creating new association.")
@@ -611,7 +613,8 @@ class BasicSubmission(BaseClass, LogMixin):
missing = value in ['', 'None', None]
match key:
case "reagents":
- field_value = [item.to_pydantic(extraction_kit=self.extraction_kit) for item in self.submission_reagent_associations]
+ field_value = [item.to_pydantic(extraction_kit=self.extraction_kit) for item in
+ self.submission_reagent_associations]
case "samples":
field_value = [item.to_pydantic() for item in self.submission_sample_associations]
case "equipment":
@@ -643,7 +646,8 @@ class BasicSubmission(BaseClass, LogMixin):
continue
new_dict[key] = field_value
new_dict['filepath'] = Path(tempfile.TemporaryFile().name)
- return PydSubmission(**new_dict)
+ dicto.update(new_dict)
+ return PydSubmission(**dicto)
def save(self, original: bool = True):
"""
@@ -1006,7 +1010,7 @@ class BasicSubmission(BaseClass, LogMixin):
@setup_lookup
def query(cls,
submission_type: str | SubmissionType | None = None,
- submission_type_name: str|None = None,
+ submission_type_name: str | None = None,
id: int | str | None = None,
rsl_plate_num: str | None = None,
start_date: date | str | int | None = None,
@@ -1287,7 +1291,7 @@ class BasicSubmission(BaseClass, LogMixin):
writer = pyd.to_writer()
writer.xl.save(filename=fname.with_suffix(".xlsx"))
- def get_turnaround_time(self) -> Tuple[int|None, bool|None]:
+ def get_turnaround_time(self) -> Tuple[int | None, bool | None]:
try:
completed = self.completed_date.date()
except AttributeError:
@@ -1295,7 +1299,8 @@ class BasicSubmission(BaseClass, LogMixin):
return self.calculate_turnaround(start_date=self.submitted_date.date(), end_date=completed)
@classmethod
- def calculate_turnaround(cls, start_date:date|None=None, end_date:date|None=None) -> Tuple[int|None, bool|None]:
+ def calculate_turnaround(cls, start_date: date | None = None, end_date: date | None = None) -> Tuple[
+ int | None, bool | None]:
if 'pytest' not in sys.modules:
from tools import ctx
else:
@@ -1499,7 +1504,7 @@ class Wastewater(BasicSubmission):
output = []
for sample in samples:
# NOTE: remove '-{target}' from controls
- sample['sample'] = re.sub('-N\\d$', '', sample['sample'])
+ sample['sample'] = re.sub('-N\\d*$', '', sample['sample'])
# NOTE: if sample is already in output skip
if sample['sample'] in [item['sample'] for item in output]:
logger.warning(f"Already have {sample['sample']}")
@@ -1509,7 +1514,7 @@ class Wastewater(BasicSubmission):
# NOTE: Set assessment
sample[f"{sample['target'].lower()}_status"] = sample['assessment']
# NOTE: Get sample having other target
- other_targets = [s for s in samples if re.sub('-N\\d$', '', s['sample']) == sample['sample']]
+ other_targets = [s for s in samples if re.sub('-N\\d*$', '', s['sample']) == sample['sample']]
for s in other_targets:
sample[f"ct_{s['target'].lower()}"] = s['ct'] if isinstance(s['ct'], float) else 0.0
sample[f"{s['target'].lower()}_status"] = s['assessment']
@@ -1613,7 +1618,9 @@ class Wastewater(BasicSubmission):
sample_dict = next(item for item in pcr_samples if item['sample'] == sample.rsl_number)
except StopIteration:
continue
- self.update_subsampassoc(sample=sample, input_dict=sample_dict)
+ assoc = self.update_subsampassoc(sample=sample, input_dict=sample_dict)
+ result = assoc.save()
+ report.add_result(result)
controltype = ControlType.query(name="PCR Control")
submitted_date = datetime.strptime(" ".join(parser.pcr['run_start_date/time'].split(" ")[:-1]),
"%Y-%m-%d %I:%M:%S %p")
@@ -1623,6 +1630,27 @@ class Wastewater(BasicSubmission):
new_control.controltype = controltype
new_control.submission = self
new_control.save()
+ return report
+
+ def update_subsampassoc(self, sample: BasicSample, input_dict: dict):
+ """
+ Updates a joined submission sample association by assigning ct values to n1 or n2 based on alphabetical sorting.
+
+ Args:
+ sample (BasicSample): Associated sample.
+ input_dict (dict): values to be updated
+
+ Returns:
+ SubmissionSampleAssociation: Updated association
+ """
+ assoc = super().update_subsampassoc(sample=sample, input_dict=input_dict)
+ targets = {k: input_dict[k] for k in sorted(input_dict.keys()) if k.startswith("ct_")}
+ assert 0 < len(targets) <= 2
+ for i, v in enumerate(targets.values(), start=1):
+ update_key = f"ct_n{i}"
+ if getattr(assoc, update_key) is None:
+ setattr(assoc, update_key, v)
+ return assoc
class WastewaterArtic(BasicSubmission):
@@ -1661,7 +1689,7 @@ class WastewaterArtic(BasicSubmission):
else:
output['artic_technician'] = self.artic_technician
output['gel_info'] = self.gel_info
- output['gel_image_path'] = self.gel_image
+ output['gel_image'] = self.gel_image
output['dna_core_submission_number'] = self.dna_core_submission_number
output['source_plates'] = self.source_plates
output['artic_date'] = self.artic_date or self.submitted_date
@@ -1988,7 +2016,6 @@ class WastewaterArtic(BasicSubmission):
egel_section = custom_fields['egel_info']
# NOTE: print json field gel results to Egel results
worksheet = input_excel[egel_section['sheet']]
- # TODO: Move all this into a seperate function?
start_row = egel_section['start_row'] - 1
start_column = egel_section['start_column'] - 3
for row, ki in enumerate(info['gel_info']['value'], start=1):
@@ -2003,10 +2030,10 @@ class WastewaterArtic(BasicSubmission):
logger.error(f"Failed {kj['name']} with value {kj['value']} to row {row}, column {column}")
else:
logger.warning("No gel info found.")
- if check_key_or_attr(key='gel_image_path', interest=info, check_none=True):
+ if check_key_or_attr(key='gel_image', interest=info, check_none=True):
worksheet = input_excel[egel_section['sheet']]
with ZipFile(cls.__directory_path__.joinpath("submission_imgs.zip")) as zipped:
- z = zipped.extract(info['gel_image_path']['value'], Path(TemporaryDirectory().name))
+ z = zipped.extract(info['gel_image']['value'], Path(TemporaryDirectory().name))
img = OpenpyxlImage(z)
img.height = 400 # insert image height in pixels as float or int (e.g. 305.5)
img.width = 600
@@ -2041,9 +2068,9 @@ class WastewaterArtic(BasicSubmission):
headers = [item['name'] for item in base_dict['gel_info'][0]['values']]
base_dict['headers'] = [''] * (4 - len(headers))
base_dict['headers'] += headers
- if check_key_or_attr(key='gel_image_path', interest=base_dict, check_none=True):
+ if check_key_or_attr(key='gel_image', interest=base_dict, check_none=True):
with ZipFile(cls.__directory_path__.joinpath("submission_imgs.zip")) as zipped:
- base_dict['gel_image'] = base64.b64encode(zipped.read(base_dict['gel_image_path'])).decode('utf-8')
+ base_dict['gel_image_actual'] = base64.b64encode(zipped.read(base_dict['gel_image'])).decode('utf-8')
return base_dict, template
def custom_context_events(self) -> dict:
diff --git a/src/submissions/backend/scripts/__init__.py b/src/submissions/backend/scripts/__init__.py
index a6a3d8e..da58fa1 100644
--- a/src/submissions/backend/scripts/__init__.py
+++ b/src/submissions/backend/scripts/__init__.py
@@ -1,7 +1,8 @@
-from .irida import import_irida
+from pathlib import Path
+import importlib
-def hello(ctx):
- print("\n\nHello! Welcome to Robotics Submission Tracker.\n\n")
-
-def goodbye(ctx):
- print("\n\nGoodbye. Thank you for using Robotics Submission Tracker.\n\n")
+p = Path(__file__).parent.absolute()
+subs = [item.stem for item in p.glob("*.py") if "__" not in item.stem]
+modules = {}
+for sub in subs:
+ importlib.import_module(f"backend.scripts.{sub}")
diff --git a/src/submissions/backend/scripts/backup_database.py b/src/submissions/backend/scripts/backup_database.py
new file mode 100644
index 0000000..f81fe74
--- /dev/null
+++ b/src/submissions/backend/scripts/backup_database.py
@@ -0,0 +1,45 @@
+"""
+script meant to copy database data to new file. Currently for Sqlite only
+"""
+import logging, shutil
+from datetime import date
+from pathlib import Path
+from tools import Settings
+import pyodbc
+
+logger = logging.getLogger(f"submissions.{__name__}")
+
+
+def script(ctx: Settings):
+ """
+ Copies the database into the backup directory the first time it is opened every month.
+ """
+ month = date.today().strftime("%Y-%m")
+ current_month_bak = Path(ctx.backup_path).joinpath(f"submissions_backup-{month}").resolve()
+ logger.info(f"Here is the db directory: {ctx.database_path}")
+ logger.info(f"Here is the backup directory: {ctx.backup_path}")
+ match ctx.database_schema:
+ case "sqlite":
+ db_path = ctx.database_path.joinpath(ctx.database_name).with_suffix(".db")
+ current_month_bak = current_month_bak.with_suffix(".db")
+ if not current_month_bak.exists() and "Archives" not in db_path.__str__():
+ logger.info("No backup found for this month, backing up database.")
+ try:
+ shutil.copyfile(db_path, current_month_bak)
+ except PermissionError as e:
+ logger.error(f"Couldn't backup database due to: {e}")
+ case "postgresql+psycopg2":
+ logger.warning(f"Backup function not yet implemented for psql")
+ current_month_bak = current_month_bak.with_suffix(".psql")
+ case "mssql+pyodbc":
+ logger.warning(f"{ctx.database_schema} backup is currently experiencing permission issues")
+ current_month_bak = current_month_bak.with_suffix(".bak")
+ return
+ if not current_month_bak.exists():
+ logger.info(f"No backup found for this month, backing up database to {current_month_bak}.")
+ connection = pyodbc.connect(driver='{ODBC Driver 18 for SQL Server}',
+ server=f'{ctx.database_path}', database=f'{ctx.database_name}',
+ trusted_connection='yes', trustservercertificate="yes", autocommit=True)
+ backup = f"BACKUP DATABASE [{ctx.database_name}] TO DISK = N'{current_month_bak}'"
+ cursor = connection.cursor().execute(backup)
+ connection.close()
diff --git a/src/submissions/backend/scripts/goodbye.py b/src/submissions/backend/scripts/goodbye.py
new file mode 100644
index 0000000..67b829a
--- /dev/null
+++ b/src/submissions/backend/scripts/goodbye.py
@@ -0,0 +1,5 @@
+"""
+Test script for teardown_scripts
+"""
+def script(ctx):
+ print("\n\nGoodbye. Thank you for using Robotics Submission Tracker.\n\n")
diff --git a/src/submissions/backend/scripts/hello.py b/src/submissions/backend/scripts/hello.py
new file mode 100644
index 0000000..b2c660e
--- /dev/null
+++ b/src/submissions/backend/scripts/hello.py
@@ -0,0 +1,5 @@
+"""
+Test script for startup_scripts
+"""
+def script(ctx):
+ print("\n\nHello! Welcome to Robotics Submission Tracker.\n\n")
diff --git a/src/submissions/backend/scripts/irida.py b/src/submissions/backend/scripts/import_irida.py
similarity index 98%
rename from src/submissions/backend/scripts/irida.py
rename to src/submissions/backend/scripts/import_irida.py
index 78d9224..9e71f4e 100644
--- a/src/submissions/backend/scripts/irida.py
+++ b/src/submissions/backend/scripts/import_irida.py
@@ -7,7 +7,8 @@ from backend.db import IridaControl, ControlType
logger = logging.getLogger(f"submissions.{__name__}")
-def import_irida(ctx:Settings):
+
+def script(ctx:Settings):
"""
Grabs Irida controls from secondary database.
diff --git a/src/submissions/backend/validators/pydant.py b/src/submissions/backend/validators/pydant.py
index 69bf71f..093d1f0 100644
--- a/src/submissions/backend/validators/pydant.py
+++ b/src/submissions/backend/validators/pydant.py
@@ -502,7 +502,6 @@ class PydSubmission(BaseModel, extra='allow'):
dlg = ObjectSelector(title="Missing Submitting Lab",
message="We need a submitting lab. Please select from the list.",
obj_type=Organization)
-
if dlg.exec():
value['value'] = dlg.parse_form()
else:
diff --git a/src/submissions/frontend/widgets/app.py b/src/submissions/frontend/widgets/app.py
index d552b11..4d2005a 100644
--- a/src/submissions/frontend/widgets/app.py
+++ b/src/submissions/frontend/widgets/app.py
@@ -60,7 +60,6 @@ class App(QMainWindow):
self._connectActions()
self.show()
self.statusBar().showMessage('Ready', 5000)
- self.backup_database()
def _createMenuBar(self):
"""
@@ -169,28 +168,6 @@ class App(QMainWindow):
dlg = SearchBox(self, object_type=BasicSample, extras=[])
dlg.exec()
- def backup_database(self):
- """
- Copies the database into the backup directory the first time it is opened every month.
- """
- month = date.today().strftime("%Y-%m")
- current_month_bak = Path(self.ctx.backup_path).joinpath(f"submissions_backup-{month}").resolve()
- logger.info(f"Here is the db directory: {self.ctx.database_path}")
- logger.info(f"Here is the backup directory: {self.ctx.backup_path}")
- match self.ctx.database_schema:
- case "sqlite":
- db_path = self.ctx.database_path.joinpath(self.ctx.database_name).with_suffix(".db")
- current_month_bak = current_month_bak.with_suffix(".db")
- if not current_month_bak.exists() and "Archives" not in db_path.__str__():
- logger.info("No backup found for this month, backing up database.")
- try:
- shutil.copyfile(db_path, current_month_bak)
- except PermissionError as e:
- logger.error(f"Couldn't backup database due to: {e}")
- case "postgresql+psycopg2":
- logger.warning(f"Backup function not yet implemented for psql")
- current_month_bak = current_month_bak.with_suffix(".psql")
-
def export_ST_yaml(self):
"""
Copies submission type yaml to file system for editing and remport
diff --git a/src/submissions/frontend/widgets/controls_chart.py b/src/submissions/frontend/widgets/controls_chart.py
index e7562d1..0aa6045 100644
--- a/src/submissions/frontend/widgets/controls_chart.py
+++ b/src/submissions/frontend/widgets/controls_chart.py
@@ -83,7 +83,6 @@ class ControlsViewer(InfoPane):
@report_result
def chart_maker_function(self, *args, **kwargs):
- # TODO: Generalize this by moving as much code as possible to IridaControl
"""
Create html chart for controls reporting
@@ -94,7 +93,7 @@ class ControlsViewer(InfoPane):
Tuple[QMainWindow, dict]: Collection of new main app window and result dict
"""
report = Report()
- # NOTE: set the mode_sub_type for kraken
+ # NOTE: set the mode_sub_type for kraken. Disabled in PCRControl
if self.mode_sub_typer.currentText() == "":
self.mode_sub_type = None
else:
diff --git a/src/submissions/templates/wastewaterartic_details.html b/src/submissions/templates/wastewaterartic_details.html
index 019e1e7..52e5c2f 100644
--- a/src/submissions/templates/wastewaterartic_details.html
+++ b/src/submissions/templates/wastewaterartic_details.html
@@ -12,9 +12,9 @@
{% if sub['gel_info'] %}