Various bug fixes and streamlining.

This commit is contained in:
lwark
2024-10-03 15:09:41 -05:00
parent acab9d0f4c
commit c5470b9062
22 changed files with 222 additions and 380 deletions

View File

@@ -27,7 +27,7 @@ from openpyxl.drawing.image import Image as OpenpyxlImage
from tools import row_map, setup_lookup, jinja_template_loading, rreplace, row_keys, check_key_or_attr, Result, Report, \
report_result
from datetime import datetime, date
from typing import List, Any, Tuple, Literal
from typing import List, Any, Tuple, Literal, Generator
from dateutil.parser import parse
from pathlib import Path
from jinja2.exceptions import TemplateNotFound
@@ -592,8 +592,8 @@ class BasicSubmission(BaseClass):
case "ctx" | "csv" | "filepath" | "equipment":
return
case item if item in self.jsons():
match value:
case dict():
match key:
case "custom":
existing = value
case _:
# logger.debug(f"Setting JSON attribute.")
@@ -611,10 +611,7 @@ class BasicSubmission(BaseClass):
existing += value
else:
if value is not None:
if key == "custom":
existing = value
else:
existing.append(value)
existing.append(value)
self.__setattr__(key, existing)
flag_modified(self, key)
return
@@ -889,19 +886,6 @@ class BasicSubmission(BaseClass):
ws.cell(row=item['row'], column=item['column'], value=item['value'])
return input_excel
@classmethod
def custom_docx_writer(cls, input_dict: dict, tpl_obj=None):
"""
Adds custom fields to docx template writer for exported details.
Args:
input_dict (dict): Incoming default dictionary.
tpl_obj (_type_, optional): Template object. Defaults to None.
Returns:
dict: Dictionary with information added.
"""
return input_dict
@classmethod
def enforce_name(cls, instr: str, data: dict | None = {}) -> str:
@@ -962,7 +946,7 @@ class BasicSubmission(BaseClass):
return re.sub(rf"{data['abbreviation']}(\d)", rf"{data['abbreviation']}-\1", outstr)
@classmethod
def parse_pcr(cls, xl: Workbook, rsl_plate_num: str) -> list:
def parse_pcr(cls, xl: Workbook, rsl_plate_num: str) -> Generator[dict, None, None]:
"""
Perform parsing of pcr info. Since most of our PC outputs are the same format, this should work for most.
@@ -977,7 +961,7 @@ class BasicSubmission(BaseClass):
pcr_sample_map = cls.get_submission_type().sample_map['pcr_samples']
# logger.debug(f'sample map: {pcr_sample_map}')
main_sheet = xl[pcr_sample_map['main_sheet']]
samples = []
# samples = []
fields = {k: v for k, v in pcr_sample_map.items() if k not in ['main_sheet', 'start_row']}
for row in main_sheet.iter_rows(min_row=pcr_sample_map['start_row']):
idx = row[0].row
@@ -985,8 +969,9 @@ class BasicSubmission(BaseClass):
for k, v in fields.items():
sheet = xl[v['sheet']]
sample[k] = sheet.cell(row=idx, column=v['column']).value
samples.append(sample)
return samples
yield sample
# samples.append(sample)
# return samples
@classmethod
def filename_template(cls) -> str:
@@ -1533,17 +1518,17 @@ class Wastewater(BasicSubmission):
return input_dict
@classmethod
def parse_pcr(cls, xl: Workbook, rsl_plate_num: str) -> List[dict]:
def parse_pcr(cls, xl: Workbook, rsl_plate_num: str) -> Generator[dict, None, None]:
"""
Parse specific to wastewater samples.
"""
samples = super().parse_pcr(xl=xl, rsl_plate_num=rsl_plate_num)
samples = [item for item in super().parse_pcr(xl=xl, rsl_plate_num=rsl_plate_num)]
# logger.debug(f'Samples from parent pcr parser: {pformat(samples)}')
output = []
for sample in samples:
# NOTE: remove '-{target}' from controls
sample['sample'] = re.sub('-N\\d$', '', sample['sample'])
# NOTE: if sample is already in output skip
# # NOTE: if sample is already in output skip
if sample['sample'] in [item['sample'] for item in output]:
logger.warning(f"Already have {sample['sample']}")
continue
@@ -1564,8 +1549,10 @@ class Wastewater(BasicSubmission):
del sample['assessment']
except KeyError:
pass
# yield sample
output.append(sample)
return output
for sample in output:
yield sample
@classmethod
def enforce_name(cls, instr: str, data: dict | None = {}) -> str:
@@ -1677,49 +1664,18 @@ class Wastewater(BasicSubmission):
return report
parser = PCRParser(filepath=fname)
self.set_attribute("pcr_info", parser.pcr)
pcr_samples = [sample for sample in parser.samples]
self.save(original=False)
# logger.debug(f"Got {len(parser.samples)} samples to update!")
# logger.debug(f"Parser samples: {parser.samples}")
for sample in self.samples:
# logger.debug(f"Running update on: {sample}")
try:
sample_dict = next(item for item in parser.samples if item['sample'] == sample.rsl_number)
sample_dict = next(item for item in pcr_samples if item['sample'] == sample.rsl_number)
except StopIteration:
continue
self.update_subsampassoc(sample=sample, input_dict=sample_dict)
@classmethod
def custom_docx_writer(cls, input_dict: dict, tpl_obj=None) -> dict:
"""
Adds custom fields to docx template writer for exported details. Extends parent.
Args:
input_dict (dict): Incoming default dictionary.
tpl_obj (_type_, optional): Template object. Defaults to None.
Returns:
dict: Dictionary with information added.
"""
from backend.excel.writer import DocxWriter
input_dict = super().custom_docx_writer(input_dict)
well_24 = []
input_dict['samples'] = [item for item in input_dict['samples']]
samples_copy = deepcopy(input_dict['samples'])
for sample in sorted(samples_copy, key=itemgetter('column', 'row')):
try:
row = sample['source_row']
except KeyError:
continue
try:
column = sample['source_column']
except KeyError:
continue
copy = dict(submitter_id=sample['submitter_id'], row=row, column=column)
well_24.append(copy)
input_dict['origin_plate'] = [item for item in
DocxWriter.create_plate_map(sample_list=well_24, rows=4, columns=6)]
return input_dict
class WastewaterArtic(BasicSubmission):
"""
@@ -2038,11 +1994,17 @@ class WastewaterArtic(BasicSubmission):
"""
input_dict = super().custom_validation(pyd)
# logger.debug(f"Incoming input_dict: {pformat(input_dict)}")
exclude_plates = [None, "", "none", "na"]
pyd.source_plates = [plate for plate in pyd.source_plates if plate['plate'].lower() not in exclude_plates]
for sample in pyd.samples:
# logger.debug(f"Sample: {sample}")
if re.search(r"^NTC", sample.submitter_id):
sample.submitter_id = f"{sample.submitter_id}-WWG-{pyd.rsl_plate_num}"
# input_dict['csv'] = xl["hitpicks_csv_to_export"]
if isinstance(pyd.rsl_plate_num, dict):
placeholder = pyd.rsl_plate_num['value']
else:
placeholder = pyd.rsl_plate_num
sample.submitter_id = f"{sample.submitter_id}-WWG-{placeholder}"
# logger.debug(f"sample id: {sample.submitter_id}")
return input_dict
@classmethod
@@ -2075,6 +2037,7 @@ class WastewaterArtic(BasicSubmission):
for iii, plate in enumerate(info['source_plates']['value']):
# logger.debug(f"Plate: {plate}")
row = start_row + iii
logger.debug(f"Writing {plate} to row {iii}")
try:
worksheet.cell(row=row, column=source_plates_section['plate_column'], value=plate['plate'])
except TypeError:
@@ -2209,30 +2172,6 @@ class WastewaterArtic(BasicSubmission):
zipf.write(img_path, self.gel_image)
self.save()
@classmethod
def custom_docx_writer(cls, input_dict: dict, tpl_obj=None) -> dict:
"""
Adds custom fields to docx template writer for exported details.
Args:
input_dict (dict): Incoming default dictionary/
tpl_obj (_type_, optional): Template object. Defaults to None.
Returns:
dict: Dictionary with information added.
"""
input_dict = super().custom_docx_writer(input_dict)
# NOTE: if there's a gel image, extract it.
if check_key_or_attr(key='gel_image_path', interest=input_dict, check_none=True):
with ZipFile(cls.__directory_path__.joinpath("submission_imgs.zip")) as zipped:
img = zipped.read(input_dict['gel_image_path'])
with tempfile.TemporaryFile(mode="wb", suffix=".jpg", delete=False) as tmp:
tmp.write(img)
# logger.debug(f"Tempfile: {tmp.name}")
img = InlineImage(tpl_obj, image_descriptor=tmp.name, width=Inches(5.5)) #, width=5.5)#, height=400)
input_dict['gel_image'] = img
return input_dict
# Sample Classes
@@ -2493,6 +2432,8 @@ class BasicSample(BaseClass):
model = cls.find_polymorphic_subclass(polymorphic_identity=sample_type)
case BasicSample():
model = sample_type
case None:
model = cls
case _:
model = cls.find_polymorphic_subclass(attrs=kwargs)
# logger.debug(f"Length of kwargs: {len(kwargs)}")
@@ -2514,7 +2455,7 @@ class BasicSample(BaseClass):
raise AttributeError(f"Delete not implemented for {self.__class__}")
@classmethod
def get_searchables(cls):
def get_searchables(cls) -> List[dict]:
"""
Delivers a list of fields that can be used in fuzzy search.

View File

@@ -1,13 +1,15 @@
'''
Contains functions for generating summary reports
'''
from PyQt6.QtCore import QMarginsF
from PyQt6.QtGui import QPageLayout, QPageSize
from pandas import DataFrame, ExcelWriter
import logging, re
from pathlib import Path
from datetime import date, timedelta
from typing import List, Tuple, Any
from backend.db.models import BasicSubmission
from tools import jinja_template_loading, html_to_pdf, get_first_blank_df_row, \
from tools import jinja_template_loading, get_first_blank_df_row, \
row_map
from PyQt6.QtWidgets import QWidget
from openpyxl.worksheet.worksheet import Worksheet
@@ -99,11 +101,15 @@ class ReportMaker(object):
filename = Path(filename)
filename = filename.absolute()
# NOTE: html_to_pdf doesn't function without a PyQt6 app
if isinstance(obj, QWidget):
logger.info(f"We're in PyQt environment, writing PDF to: {filename}")
html_to_pdf(html=self.html, output_file=filename)
else:
logger.info("Not in PyQt. Skipping PDF writing.")
# if isinstance(obj, QWidget):
# logger.info(f"We're in PyQt environment, writing PDF to: {filename}")
# page_layout = QPageLayout()
# page_layout.setPageSize(QPageSize(QPageSize.PageSizeId.A4))
# page_layout.setOrientation(QPageLayout.Orientation.Portrait)
# page_layout.setMargins(QMarginsF(25, 25, 25, 25))
# self.webview.page().printToPdf(fname.with_suffix(".pdf").__str__(), page_layout)
# else:
# logger.info("Not in PyQt. Skipping PDF writing.")
# logger.debug("Finished writing.")
self.writer = ExcelWriter(filename.with_suffix(".xlsx"), engine='openpyxl')
self.summary_df.to_excel(self.writer, sheet_name="Report")

View File

@@ -3,9 +3,6 @@ contains writer objects for pushing values to submission sheet templates.
"""
import logging
from copy import copy
from operator import itemgetter
from pathlib import Path
# from pathlib import Path
from pprint import pformat
from typing import List, Generator
from openpyxl import load_workbook, Workbook
@@ -13,9 +10,6 @@ from backend.db.models import SubmissionType, KitType, BasicSubmission
from backend.validators.pydant import PydSubmission
from io import BytesIO
from collections import OrderedDict
from tools import jinja_template_loading
from docxtpl import DocxTemplate
from docx import Document
logger = logging.getLogger(f"submissions.{__name__}")
@@ -147,7 +141,6 @@ class InfoWriter(object):
Returns:
dict: merged dictionary
"""
# output = {}
for k, v in info_dict.items():
if v is None:
continue
@@ -163,8 +156,6 @@ class InfoWriter(object):
if len(dicto) > 0:
# output[k] = dicto
yield k, dicto
# logger.debug(f"Reconciled info: {pformat(output)}")
# return output
def write_info(self) -> Workbook:
"""
@@ -217,7 +208,6 @@ class ReagentWriter(object):
if isinstance(extraction_kit, str):
kit_type = KitType.query(name=extraction_kit)
reagent_map = {k: v for k, v in kit_type.construct_xl_map_for_use(submission_type)}
# self.reagents = {k: v for k, v in self.reconcile_map(reagent_list=reagent_list, reagent_map=reagent_map)}
self.reagents = self.reconcile_map(reagent_list=reagent_list, reagent_map=reagent_map)
def reconcile_map(self, reagent_list: List[dict], reagent_map: dict) -> Generator[dict, None, None]:
@@ -231,7 +221,6 @@ class ReagentWriter(object):
Returns:
List[dict]: merged dictionary
"""
# output = []
for reagent in reagent_list:
try:
mp_info = reagent_map[reagent['role']]
@@ -246,9 +235,7 @@ class ReagentWriter(object):
dicto = v
placeholder[k] = dicto
placeholder['sheet'] = mp_info['sheet']
# output.append(placeholder)
yield placeholder
# return output
def write_reagents(self) -> Workbook:
"""
@@ -285,7 +272,6 @@ class SampleWriter(object):
self.submission_type = submission_type
self.xl = xl
self.sample_map = submission_type.construct_sample_map()['lookup_table']
# self.samples = self.reconcile_map(sample_list)
# NOTE: exclude any samples without a submission rank.
samples = [item for item in self.reconcile_map(sample_list) if item['submission_rank'] > 0]
self.samples = sorted(samples, key=lambda k: k['submission_rank'])
@@ -300,7 +286,6 @@ class SampleWriter(object):
Returns:
List[dict]: List of merged dictionaries
"""
# output = []
multiples = ['row', 'column', 'assoc_id', 'submission_rank']
for sample in sample_list:
# logger.debug(f"Writing sample: {sample}")
@@ -311,7 +296,6 @@ class SampleWriter(object):
continue
new[k] = v
yield new
# return sorted(output, key=lambda k: k['submission_rank'])
def write_samples(self) -> Workbook:
"""
@@ -325,6 +309,11 @@ class SampleWriter(object):
for sample in self.samples:
row = self.sample_map['start_row'] + (sample['submission_rank'] - 1)
for k, v in sample.items():
if isinstance(v, dict):
try:
v = v['value']
except KeyError:
logger.error(f"Cant convert {v} to single string.")
try:
column = columns[k]
except KeyError:
@@ -363,7 +352,6 @@ class EquipmentWriter(object):
Returns:
List[dict]: List of merged dictionaries
"""
# output = []
if equipment_list is None:
return
for ii, equipment in enumerate(equipment_list, start=1):
@@ -388,10 +376,7 @@ class EquipmentWriter(object):
placeholder['sheet'] = mp_info['sheet']
except KeyError:
placeholder['sheet'] = "Equipment"
# logger.debug(f"Final output of {equipment['role']} : {placeholder}")
yield placeholder
# output.append(placeholder)
# return output
def write_equipment(self) -> Workbook:
"""
@@ -452,19 +437,19 @@ class TipWriter(object):
Returns:
List[dict]: List of merged dictionaries
"""
# output = []
if tips_list is None:
return
for ii, tips in enumerate(tips_list, start=1):
mp_info = tips_map[tips['role']]
# mp_info = tips_map[tips['role']]
mp_info = tips_map[tips.role]
# logger.debug(f"{tips['role']} map: {mp_info}")
placeholder = copy(tips)
placeholder = {}
if mp_info == {}:
for jj, (k, v) in enumerate(tips.items(), start=1):
for jj, (k, v) in enumerate(tips.__dict__.items(), start=1):
dicto = dict(value=v, row=ii, column=jj)
placeholder[k] = dicto
else:
for jj, (k, v) in enumerate(tips.items(), start=1):
for jj, (k, v) in enumerate(tips.__dict__.items(), start=1):
try:
dicto = dict(value=v, row=mp_info[k]['row'], column=mp_info[k]['column'])
except KeyError as e:
@@ -477,8 +462,6 @@ class TipWriter(object):
placeholder['sheet'] = "Tips"
# logger.debug(f"Final output of {tips['role']} : {placeholder}")
yield placeholder
# output.append(placeholder)
# return output
def write_tips(self) -> Workbook:
"""
@@ -507,72 +490,3 @@ class TipWriter(object):
logger.error(f"Couldn't write to {tips['sheet']}, row: {v['row']}, column: {v['column']}")
logger.error(e)
return self.xl
class DocxWriter(object):
"""
Object to render
"""
def __init__(self, base_dict: dict):
"""
Args:
base_dict (dict): dictionary of info to be written to template.
"""
logger.debug(f"Incoming base dict: {pformat(base_dict)}")
self.sub_obj = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=base_dict['submission_type'])
env = jinja_template_loading()
temp_name = f"{base_dict['submission_type'].replace(' ', '').lower()}_subdocument.docx"
path = Path(env.loader.__getattribute__("searchpath")[0])
main_template = path.joinpath("basicsubmission_document.docx")
subdocument = path.joinpath(temp_name)
if subdocument.exists():
main_template = self.create_merged_template(main_template, subdocument)
self.template = DocxTemplate(main_template)
base_dict['platemap'] = [item for item in self.create_plate_map(base_dict['samples'], rows=8, columns=12)]
# logger.debug(pformat(base_dict['platemap']))
try:
base_dict['excluded'] += ["platemap"]
except KeyError:
base_dict['excluded'] = ["platemap"]
base_dict = self.sub_obj.custom_docx_writer(base_dict, tpl_obj=self.template)
# logger.debug(f"Base dict: {pformat(base_dict)}")
self.template.render({"sub": base_dict})
@classmethod
def create_plate_map(self, sample_list: List[dict], rows: int = 0, columns: int = 0) -> List[list]:
sample_list = sorted(sample_list, key=itemgetter('column', 'row'))
# NOTE if rows or samples is default, set to maximum value in sample list
if rows == 0:
rows = max([sample['row'] for sample in sample_list])
if columns == 0:
columns = max([sample['column'] for sample in sample_list])
for row in range(0, rows):
# NOTE: Create a list with length equal to columns length, padding with '' where necessary
contents = [next((item['submitter_id'] for item in sample_list if item['row'] == row + 1 and
item['column'] == column + 1), '') for column in range(0, columns)]
yield contents
def create_merged_template(self, *args) -> BytesIO:
"""
Appends submission specific information
Returns:
BytesIO: Merged docx template
"""
merged_document = Document()
output = BytesIO()
for index, file in enumerate(args):
sub_doc = Document(file)
# Don't add a page break if you've reached the last file.
# if index < len(args) - 1:
# sub_doc.add_page_break()
for element in sub_doc.element.body:
merged_document.element.body.append(element)
merged_document.save(output)
return output
def save(self, filename: Path | str):
if isinstance(filename, str):
filename = Path(filename)
self.template.save(filename)

View File

@@ -26,7 +26,7 @@ class RSLNamer(object):
if self.submission_type is None:
# logger.debug("Creating submission type because none exists")
self.submission_type = self.retrieve_submission_type(filename=filename)
logger.debug(f"got submission type: {self.submission_type}")
logger.info(f"got submission type: {self.submission_type}")
if self.submission_type is not None:
# logger.debug("Retrieving BasicSubmission subclass")
self.sub_object = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=self.submission_type)
@@ -48,36 +48,41 @@ class RSLNamer(object):
Returns:
str: parsed submission type
"""
def st_from_path(filename:Path) -> str:
logger.debug(f"Using path method for {filename}.")
if filename.exists():
wb = load_workbook(filename)
try:
# NOTE: Gets first category in the metadata.
submission_type = next(item.strip().title() for item in wb.properties.category.split(";"))
except (StopIteration, AttributeError):
sts = {item.name: item.get_template_file_sheets() for item in SubmissionType.query()}
try:
submission_type = next(k.title() for k,v in sts.items() if wb.sheetnames==v)
except StopIteration:
# NOTE: On failure recurse using filename as string for string method
submission_type = cls.retrieve_submission_type(filename=filename.stem.__str__())
else:
submission_type = cls.retrieve_submission_type(filename=filename.stem.__str__())
return submission_type
def st_from_str(filename:str) -> str:
regex = BasicSubmission.construct_regex()
logger.debug(f"Using string method for {filename}.")
logger.debug(f"Using regex: {regex}")
m = regex.search(filename)
print(m)
try:
submission_type = m.lastgroup
logger.debug(f"Got submission type: {submission_type}")
except AttributeError as e:
submission_type = None
logger.critical(f"No submission type found or submission type found!: {e}")
return submission_type
match filename:
case Path():
logger.debug(f"Using path method for {filename}.")
if filename.exists():
wb = load_workbook(filename)
try:
submission_type = [item.strip().title() for item in wb.properties.category.split(";")][0]
except AttributeError:
try:
sts = {item.name: item.get_template_file_sheets() for item in SubmissionType.query()}
for k, v in sts.items():
# This gets the *first* submission type that matches the sheet names in the workbook
if wb.sheetnames == v:
submission_type = k.title()
break
except:
# On failure recurse using filename as string for string method
submission_type = cls.retrieve_submission_type(filename=filename.stem.__str__())
else:
submission_type = cls.retrieve_submission_type(filename=filename.stem.__str__())
submission_type = st_from_path(filename=filename)
case str():
regex = BasicSubmission.construct_regex()
logger.debug(f"Using string method for {filename}.")
logger.debug(f"Using regex: {regex}")
m = regex.search(filename)
try:
submission_type = m.lastgroup
logger.debug(f"Got submission type: {submission_type}")
except AttributeError as e:
logger.critical(f"No submission type found or submission type found!: {e}")
submission_type = st_from_str(filename=filename)
case _:
submission_type = None
try:
@@ -93,6 +98,7 @@ class RSLNamer(object):
message="Please select submission type from list below.", obj_type=SubmissionType)
if dlg.exec():
submission_type = dlg.parse_form()
print(submission_type)
submission_type = submission_type.replace("_", " ")
return submission_type

View File

@@ -9,7 +9,6 @@ from datetime import date, datetime, timedelta
from dateutil.parser import parse
from dateutil.parser import ParserError
from typing import List, Tuple, Literal
from types import GeneratorType
from . import RSLNamer
from pathlib import Path
from tools import check_not_nan, convert_nans_to_nones, Report, Result
@@ -49,7 +48,6 @@ class PydReagent(BaseModel):
def rescue_type_with_lookup(cls, value, values):
if value is None and values.data['lot'] is not None:
try:
# return lookup_reagents(ctx=values.data['ctx'], lot_number=values.data['lot']).name
return Reagent.query(lot_number=values.data['lot'].name)
except AttributeError:
return value
@@ -222,7 +220,8 @@ class PydSample(BaseModel, extra='allow'):
fields = list(self.model_fields.keys()) + list(self.model_extra.keys())
return {k: getattr(self, k) for k in fields}
def toSQL(self, submission: BasicSubmission | str = None) -> Tuple[BasicSample, Result]:
def toSQL(self, submission: BasicSubmission | str = None) -> Tuple[
BasicSample, List[SubmissionSampleAssociation], Result | None]:
"""
Converts this instance into a backend.db.models.submissions.Sample object
@@ -238,6 +237,7 @@ class PydSample(BaseModel, extra='allow'):
instance = BasicSample.query_or_create(sample_type=self.sample_type, submitter_id=self.submitter_id)
for key, value in self.__dict__.items():
match key:
# NOTE: row, column go in the association
case "row" | "column":
continue
case _:
@@ -259,7 +259,6 @@ class PydSample(BaseModel, extra='allow'):
**self.model_extra)
# logger.debug(f"Using submission_sample_association: {association}")
try:
# instance.sample_submission_associations.append(association)
out_associations.append(association)
except IntegrityError as e:
logger.error(f"Could not attach submission sample association due to: {e}")
@@ -316,10 +315,10 @@ class PydEquipment(BaseModel, extra='ignore'):
def make_empty_list(cls, value):
# logger.debug(f"Pydantic value: {value}")
value = convert_nans_to_nones(value)
if value is None:
value = ['']
if len(value) == 0:
if not value:
value = ['']
# if len(value) == 0:
# value = ['']
try:
value = [item.strip() for item in value]
except AttributeError:
@@ -337,7 +336,7 @@ class PydEquipment(BaseModel, extra='ignore'):
Tuple[Equipment, SubmissionEquipmentAssociation]: SQL objects
"""
if isinstance(submission, str):
logger.info(f"Got string, querying {submission}")
# logger.debug(f"Got string, querying {submission}")
submission = BasicSubmission.query(rsl_number=submission)
equipment = Equipment.query(asset_number=self.asset_number)
if equipment is None:
@@ -347,7 +346,7 @@ class PydEquipment(BaseModel, extra='ignore'):
# NOTE: Need to make sure the same association is not added to the submission
try:
assoc = SubmissionEquipmentAssociation.query(equipment_id=equipment.id, submission_id=submission.id,
role=self.role, limit=1)
role=self.role, limit=1)
except TypeError as e:
logger.error(f"Couldn't get association due to {e}, returning...")
return equipment, None
@@ -400,7 +399,7 @@ class PydSubmission(BaseModel, extra='allow'):
equipment: List[PydEquipment] | None = []
cost_centre: dict | None = Field(default=dict(value=None, missing=True), validate_default=True)
contact: dict | None = Field(default=dict(value=None, missing=True), validate_default=True)
tips: List[PydTips] | None =[]
tips: List[PydTips] | None = []
@field_validator("tips", mode="before")
@classmethod
@@ -409,7 +408,7 @@ class PydSubmission(BaseModel, extra='allow'):
if isinstance(value, dict):
value = value['value']
if isinstance(value, Generator):
logger.debug("We have a generator")
# logger.debug("We have a generator")
return [PydTips(**tips) for tips in value]
if not value:
return []
@@ -466,7 +465,7 @@ class PydSubmission(BaseModel, extra='allow'):
return dict(value=datetime.fromordinal(datetime(1900, 1, 1).toordinal() + value['value'] - 2).date(),
missing=True)
case str():
string = re.sub(r"(_|-)\d$", "", value['value'])
string = re.sub(r"(_|-)\d(R\d)?$", "", value['value'])
try:
output = dict(value=parse(string).date(), missing=True)
except ParserError as e:
@@ -568,6 +567,7 @@ class PydSubmission(BaseModel, extra='allow'):
else:
raise ValueError(f"No extraction kit found.")
if value is None:
# NOTE: Kit selection is done in the parser, so should not be necessary here.
return dict(value=None, missing=True)
return value
@@ -575,7 +575,7 @@ class PydSubmission(BaseModel, extra='allow'):
@classmethod
def make_submission_type(cls, value, values):
if not isinstance(value, dict):
value = {"value": value}
value = dict(value=value)
if check_not_nan(value['value']):
value = value['value'].title()
return dict(value=value, missing=False)
@@ -593,6 +593,8 @@ class PydSubmission(BaseModel, extra='allow'):
@field_validator("submission_category")
@classmethod
def rescue_category(cls, value, values):
if isinstance(value['value'], str):
value['value'] = value['value'].title()
if value['value'] not in ["Research", "Diagnostic", "Surveillance", "Validation"]:
value['value'] = values.data['submission_type']['value']
return value
@@ -600,18 +602,16 @@ class PydSubmission(BaseModel, extra='allow'):
@field_validator("reagents", mode="before")
@classmethod
def expand_reagents(cls, value):
# print(f"\n{type(value)}\n")
if isinstance(value, Generator):
logger.debug("We have a generator")
# logger.debug("We have a generator")
return [PydReagent(**reagent) for reagent in value]
return value
@field_validator("samples", mode="before")
@classmethod
def expand_samples(cls, value):
# print(f"\n{type(value)}\n")
if isinstance(value, Generator):
logger.debug("We have a generator")
# logger.debug("We have a generator")
return [PydSample(**sample) for sample in value]
return value
@@ -619,11 +619,10 @@ class PydSubmission(BaseModel, extra='allow'):
@classmethod
def assign_ids(cls, value):
starting_id = SubmissionSampleAssociation.autoincrement_id()
output = []
for iii, sample in enumerate(value, start=starting_id):
# NOTE: Why is this a list? Answer: to zip with the lists of rows and columns in case of multiple of the same sample.
sample.assoc_id = [iii]
output.append(sample)
return output
return value
@field_validator("cost_centre", mode="before")
@classmethod
@@ -672,7 +671,7 @@ class PydSubmission(BaseModel, extra='allow'):
else:
return value
def __init__(self, run_custom:bool=False, **data):
def __init__(self, run_custom: bool = False, **data):
super().__init__(**data)
# NOTE: this could also be done with default_factory
logger.debug(data)
@@ -682,7 +681,6 @@ class PydSubmission(BaseModel, extra='allow'):
if run_custom:
self.submission_object.custom_validation(pyd=self)
def set_attribute(self, key: str, value):
"""
Better handling of attribute setting.
@@ -742,7 +740,7 @@ class PydSubmission(BaseModel, extra='allow'):
output = {k: self.filter_field(k) for k in fields}
return output
def filter_field(self, key:str):
def filter_field(self, key: str):
item = getattr(self, key)
# logger.debug(f"Attempting deconstruction of {key}: {item} with type {type(item)}")
match item:
@@ -796,8 +794,6 @@ class PydSubmission(BaseModel, extra='allow'):
continue
# logger.debug(f"Setting {key} to {value}")
match key:
# case "custom":
# instance.custom = value
case "reagents":
if report.results[0].code == 1:
instance.submission_reagent_associations = []
@@ -833,7 +829,6 @@ class PydSubmission(BaseModel, extra='allow'):
except AttributeError:
continue
if association is not None and association not in instance.submission_tips_associations:
# association.save()
instance.submission_tips_associations.append(association)
case item if item in instance.jsons():
# logger.debug(f"{item} is a json.")
@@ -877,16 +872,9 @@ class PydSubmission(BaseModel, extra='allow'):
instance.run_cost = instance.run_cost - sum(discounts)
except Exception as e:
logger.error(f"An unknown exception occurred when calculating discounts: {e}")
# We need to make sure there's a proper rsl plate number
# logger.debug(f"We've got a total cost of {instance.run_cost}")
# try:
# logger.debug(f"Constructed instance: {instance}")
# except AttributeError as e:
# logger.debug(f"Something went wrong constructing instance {self.rsl_plate_num}: {e}")
# logger.debug(f"Constructed submissions message: {msg}")
return instance, report
def to_form(self, parent: QWidget, disable:list|None=None):
def to_form(self, parent: QWidget, disable: list | None = None):
"""
Converts this instance into a frontend.widgets.submission_widget.SubmissionFormWidget
@@ -1014,7 +1002,6 @@ class PydOrganization(BaseModel):
value = [item.to_sql() for item in getattr(self, field)]
case _:
value = getattr(self, field)
# instance.set_attribute(name=field, value=value)
instance.__setattr__(name=field, value=value)
return instance