Conversion of some functions to generators.
This commit is contained in:
3
TODO.md
3
TODO.md
@@ -1,4 +1,5 @@
|
|||||||
- [ ] Revamp frontend.widgets.controls_chart to include visualizations?
|
- [ ] Upgrade to generators when returning lists.
|
||||||
|
- [x] Revamp frontend.widgets.controls_chart to include visualizations?
|
||||||
- [x] Convert Parsers to using openpyxl.
|
- [x] Convert Parsers to using openpyxl.
|
||||||
- The hardest part of this is going to be the sample parsing. I'm onto using the cell formulas in the plate map to suss out the location in the lookup table, but it could get a little recursive up in here.
|
- The hardest part of this is going to be the sample parsing. I'm onto using the cell formulas in the plate map to suss out the location in the lookup table, but it could get a little recursive up in here.
|
||||||
- [ ] Create a default info return function.
|
- [ ] Create a default info return function.
|
||||||
|
|||||||
@@ -84,7 +84,7 @@ class ControlType(BaseClass):
|
|||||||
Returns:
|
Returns:
|
||||||
List[ControlType]: Control types that have targets
|
List[ControlType]: Control types that have targets
|
||||||
"""
|
"""
|
||||||
return [item for item in cls.query() if item.targets]# != []]
|
return [item for item in cls.query() if item.targets]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def build_positive_regex(cls) -> Pattern:
|
def build_positive_regex(cls) -> Pattern:
|
||||||
@@ -141,7 +141,9 @@ class Control(BaseClass):
|
|||||||
# logger.debug("calculating kraken count total to use in percentage")
|
# logger.debug("calculating kraken count total to use in percentage")
|
||||||
kraken_cnt_total = sum([kraken[item]['kraken_count'] for item in kraken])
|
kraken_cnt_total = sum([kraken[item]['kraken_count'] for item in kraken])
|
||||||
# logger.debug("Creating new kraken.")
|
# logger.debug("Creating new kraken.")
|
||||||
new_kraken = [dict(name=item, kraken_count=kraken[item]['kraken_count'], kraken_percent="{0:.0%}".format(kraken[item]['kraken_count'] / kraken_cnt_total)) for item in kraken]
|
new_kraken = [dict(name=item, kraken_count=kraken[item]['kraken_count'],
|
||||||
|
kraken_percent="{0:.0%}".format(kraken[item]['kraken_count'] / kraken_cnt_total)) for item in
|
||||||
|
kraken]
|
||||||
new_kraken = sorted(new_kraken, key=itemgetter('kraken_count'), reverse=True)
|
new_kraken = sorted(new_kraken, key=itemgetter('kraken_count'), reverse=True)
|
||||||
# logger.debug("setting targets")
|
# logger.debug("setting targets")
|
||||||
if not self.controltype.targets:
|
if not self.controltype.targets:
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from sqlalchemy.ext.associationproxy import association_proxy
|
|||||||
from datetime import date
|
from datetime import date
|
||||||
import logging, re
|
import logging, re
|
||||||
from tools import check_authorization, setup_lookup, Report, Result
|
from tools import check_authorization, setup_lookup, Report, Result
|
||||||
from typing import List, Literal
|
from typing import List, Literal, Generator
|
||||||
from pandas import ExcelFile
|
from pandas import ExcelFile
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from . import Base, BaseClass, Organization
|
from . import Base, BaseClass, Organization
|
||||||
@@ -168,9 +168,9 @@ class KitType(BaseClass):
|
|||||||
return [item.reagent_role for item in relevant_associations]
|
return [item.reagent_role for item in relevant_associations]
|
||||||
|
|
||||||
# TODO: Move to BasicSubmission?
|
# TODO: Move to BasicSubmission?
|
||||||
def construct_xl_map_for_use(self, submission_type: str | SubmissionType) -> dict:
|
def construct_xl_map_for_use(self, submission_type: str | SubmissionType) -> Generator[str, str]:
|
||||||
"""
|
"""
|
||||||
Creates map of locations in excel workbook for a SubmissionType
|
Creates map of locations in Excel workbook for a SubmissionType
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
submission_type (str | SubmissionType): Submissiontype.name
|
submission_type (str | SubmissionType): Submissiontype.name
|
||||||
@@ -178,7 +178,7 @@ class KitType(BaseClass):
|
|||||||
Returns:
|
Returns:
|
||||||
dict: Dictionary containing information locations.
|
dict: Dictionary containing information locations.
|
||||||
"""
|
"""
|
||||||
info_map = {}
|
# info_map = {}
|
||||||
# NOTE: Account for submission_type variable type.
|
# NOTE: Account for submission_type variable type.
|
||||||
match submission_type:
|
match submission_type:
|
||||||
case str():
|
case str():
|
||||||
@@ -193,10 +193,10 @@ class KitType(BaseClass):
|
|||||||
# logger.debug("Get all KitTypeReagentTypeAssociation for SubmissionType")
|
# logger.debug("Get all KitTypeReagentTypeAssociation for SubmissionType")
|
||||||
for assoc in assocs:
|
for assoc in assocs:
|
||||||
try:
|
try:
|
||||||
info_map[assoc.reagent_role.name] = assoc.uses
|
yield assoc.reagent_role.name, assoc.uses
|
||||||
except TypeError:
|
except TypeError:
|
||||||
continue
|
continue
|
||||||
return info_map
|
# return info_map
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@setup_lookup
|
@setup_lookup
|
||||||
@@ -409,6 +409,7 @@ class Reagent(BaseClass):
|
|||||||
rtype = reagent_role.name.replace("_", " ")
|
rtype = reagent_role.name.replace("_", " ")
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
rtype = "Unknown"
|
rtype = "Unknown"
|
||||||
|
# logger.debug(f"Role for {self.name}: {rtype}")
|
||||||
# NOTE: Calculate expiry with EOL from ReagentType
|
# NOTE: Calculate expiry with EOL from ReagentType
|
||||||
try:
|
try:
|
||||||
place_holder = self.expiry + reagent_role.eol_ext
|
place_holder = self.expiry + reagent_role.eol_ext
|
||||||
@@ -611,7 +612,8 @@ class SubmissionType(BaseClass):
|
|||||||
) #: Association of equipmentroles
|
) #: Association of equipmentroles
|
||||||
|
|
||||||
equipment = association_proxy("submissiontype_equipmentrole_associations", "equipment_role",
|
equipment = association_proxy("submissiontype_equipmentrole_associations", "equipment_role",
|
||||||
creator=lambda eq: SubmissionTypeEquipmentRoleAssociation(equipment_role=eq)) #: Proxy of equipmentrole associations
|
creator=lambda eq: SubmissionTypeEquipmentRoleAssociation(
|
||||||
|
equipment_role=eq)) #: Proxy of equipmentrole associations
|
||||||
|
|
||||||
submissiontype_kit_rt_associations = relationship(
|
submissiontype_kit_rt_associations = relationship(
|
||||||
"KitTypeReagentRoleAssociation",
|
"KitTypeReagentRoleAssociation",
|
||||||
@@ -665,7 +667,7 @@ class SubmissionType(BaseClass):
|
|||||||
|
|
||||||
def construct_info_map(self, mode: Literal['read', 'write']) -> dict:
|
def construct_info_map(self, mode: Literal['read', 'write']) -> dict:
|
||||||
"""
|
"""
|
||||||
Make of map of where all fields are located in excel sheet
|
Make of map of where all fields are located in Excel sheet
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
mode (Literal["read", "write"]): Which mode to get locations for
|
mode (Literal["read", "write"]): Which mode to get locations for
|
||||||
@@ -673,15 +675,16 @@ class SubmissionType(BaseClass):
|
|||||||
Returns:
|
Returns:
|
||||||
dict: Map of locations
|
dict: Map of locations
|
||||||
"""
|
"""
|
||||||
info = {k:v for k,v in self.info_map.items() if k != "custom"}
|
info = {k: v for k, v in self.info_map.items() if k != "custom"}
|
||||||
logger.debug(f"Info map: {info}")
|
logger.debug(f"Info map: {info}")
|
||||||
output = {}
|
|
||||||
match mode:
|
match mode:
|
||||||
case "read":
|
case "read":
|
||||||
output = {k: v[mode] for k, v in info.items() if v[mode]}
|
output = {k: v[mode] for k, v in info.items() if v[mode]}
|
||||||
case "write":
|
case "write":
|
||||||
output = {k: v[mode] + v['read'] for k, v in info.items() if v[mode] or v['read']}
|
output = {k: v[mode] + v['read'] for k, v in info.items() if v[mode] or v['read']}
|
||||||
output = {k: v for k, v in output.items() if all([isinstance(item, dict) for item in v])}
|
output = {k: v for k, v in output.items() if all([isinstance(item, dict) for item in v])}
|
||||||
|
case _:
|
||||||
|
output = {}
|
||||||
output['custom'] = self.info_map['custom']
|
output['custom'] = self.info_map['custom']
|
||||||
return output
|
return output
|
||||||
|
|
||||||
@@ -694,36 +697,38 @@ class SubmissionType(BaseClass):
|
|||||||
"""
|
"""
|
||||||
return self.sample_map
|
return self.sample_map
|
||||||
|
|
||||||
def construct_equipment_map(self) -> dict:
|
def construct_equipment_map(self) -> Generator[str, dict]:
|
||||||
"""
|
"""
|
||||||
Constructs map of equipment to excel cells.
|
Constructs map of equipment to excel cells.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
dict: Map equipment locations in excel sheet
|
dict: Map equipment locations in excel sheet
|
||||||
"""
|
"""
|
||||||
output = {}
|
# output = {}
|
||||||
# logger.debug("Iterating through equipment roles")
|
# logger.debug("Iterating through equipment roles")
|
||||||
for item in self.submissiontype_equipmentrole_associations:
|
for item in self.submissiontype_equipmentrole_associations:
|
||||||
emap = item.uses
|
emap = item.uses
|
||||||
if emap is None:
|
if emap is None:
|
||||||
emap = {}
|
emap = {}
|
||||||
output[item.equipment_role.name] = emap
|
# output[item.equipment_role.name] = emap
|
||||||
return output
|
yield item.equipment_role.name, emap
|
||||||
|
# return output
|
||||||
|
|
||||||
def construct_tips_map(self) -> dict:
|
def construct_tips_map(self) -> Generator[str, dict]:
|
||||||
"""
|
"""
|
||||||
Constructs map of tips to excel cells.
|
Constructs map of tips to excel cells.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
dict: Tip locations in the excel sheet.
|
dict: Tip locations in the excel sheet.
|
||||||
"""
|
"""
|
||||||
output = {}
|
# output = {}
|
||||||
for item in self.submissiontype_tiprole_associations:
|
for item in self.submissiontype_tiprole_associations:
|
||||||
tmap = item.uses
|
tmap = item.uses
|
||||||
if tmap is None:
|
if tmap is None:
|
||||||
tmap = {}
|
tmap = {}
|
||||||
output[item.tip_role.name] = tmap
|
# output[item.tip_role.name] = tmap
|
||||||
return output
|
yield item.tip_role.name, tmap
|
||||||
|
# return output
|
||||||
|
|
||||||
def get_equipment(self, extraction_kit: str | KitType | None = None) -> List['PydEquipmentRole']:
|
def get_equipment(self, extraction_kit: str | KitType | None = None) -> List['PydEquipmentRole']:
|
||||||
"""
|
"""
|
||||||
@@ -1280,15 +1285,16 @@ class EquipmentRole(BaseClass):
|
|||||||
Returns:
|
Returns:
|
||||||
dict: This EquipmentRole dict
|
dict: This EquipmentRole dict
|
||||||
"""
|
"""
|
||||||
output = {}
|
# output = {}
|
||||||
for key, value in self.__dict__.items():
|
return {key: value for key, value in self.__dict__.items() if key != "processes"}
|
||||||
match key:
|
# match key:
|
||||||
case "processes":
|
# case "processes":
|
||||||
pass
|
# pass
|
||||||
case _:
|
# case _:
|
||||||
value = value
|
# value = value
|
||||||
output[key] = value
|
# yield key, value
|
||||||
return output
|
# # output[key] = value
|
||||||
|
# return output
|
||||||
|
|
||||||
def to_pydantic(self, submission_type: SubmissionType,
|
def to_pydantic(self, submission_type: SubmissionType,
|
||||||
extraction_kit: str | KitType | None = None) -> "PydEquipmentRole":
|
extraction_kit: str | KitType | None = None) -> "PydEquipmentRole":
|
||||||
@@ -1668,7 +1674,6 @@ class SubmissionTipsAssociation(BaseClass):
|
|||||||
back_populates="tips_submission_associations") #: associated equipment
|
back_populates="tips_submission_associations") #: associated equipment
|
||||||
role_name = Column(String(32), primary_key=True) #, ForeignKey("_tiprole.name"))
|
role_name = Column(String(32), primary_key=True) #, ForeignKey("_tiprole.name"))
|
||||||
|
|
||||||
|
|
||||||
def to_sub_dict(self) -> dict:
|
def to_sub_dict(self) -> dict:
|
||||||
"""
|
"""
|
||||||
This item as a dictionary
|
This item as a dictionary
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ from openpyxl.worksheet.worksheet import Worksheet
|
|||||||
from openpyxl.drawing.image import Image as OpenpyxlImage
|
from openpyxl.drawing.image import Image as OpenpyxlImage
|
||||||
from tools import row_map, setup_lookup, jinja_template_loading, rreplace, row_keys, check_key_or_attr, Result, Report
|
from tools import row_map, setup_lookup, jinja_template_loading, rreplace, row_keys, check_key_or_attr, Result, Report
|
||||||
from datetime import datetime, date
|
from datetime import datetime, date
|
||||||
from typing import List, Any, Tuple, Literal
|
from typing import List, Any, Tuple, Literal, Generator
|
||||||
from dateutil.parser import parse
|
from dateutil.parser import parse
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from jinja2.exceptions import TemplateNotFound
|
from jinja2.exceptions import TemplateNotFound
|
||||||
@@ -289,7 +289,7 @@ class BasicSubmission(BaseClass):
|
|||||||
try:
|
try:
|
||||||
reagents = [item.to_sub_dict(extraction_kit=self.extraction_kit) for item in
|
reagents = [item.to_sub_dict(extraction_kit=self.extraction_kit) for item in
|
||||||
self.submission_reagent_associations]
|
self.submission_reagent_associations]
|
||||||
for k in self.extraction_kit.construct_xl_map_for_use(self.submission_type):
|
for k, v in self.extraction_kit.construct_xl_map_for_use(self.submission_type):
|
||||||
if k == 'info':
|
if k == 'info':
|
||||||
continue
|
continue
|
||||||
if not any([item['role'] == k for item in reagents]):
|
if not any([item['role'] == k for item in reagents]):
|
||||||
@@ -841,6 +841,7 @@ class BasicSubmission(BaseClass):
|
|||||||
for k, v in fields.items():
|
for k, v in fields.items():
|
||||||
sheet = xl[v['sheet']]
|
sheet = xl[v['sheet']]
|
||||||
sample[k] = sheet.cell(row=idx, column=v['column']).value
|
sample[k] = sheet.cell(row=idx, column=v['column']).value
|
||||||
|
# yield sample
|
||||||
samples.append(sample)
|
samples.append(sample)
|
||||||
return samples
|
return samples
|
||||||
|
|
||||||
@@ -1381,7 +1382,7 @@ class Wastewater(BasicSubmission):
|
|||||||
return input_dict
|
return input_dict
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def parse_pcr(cls, xl: Workbook, rsl_plate_num: str) -> list:
|
def parse_pcr(cls, xl: Workbook, rsl_plate_num: str) -> List[dict]:
|
||||||
"""
|
"""
|
||||||
Parse specific to wastewater samples.
|
Parse specific to wastewater samples.
|
||||||
"""
|
"""
|
||||||
@@ -1393,6 +1394,7 @@ class Wastewater(BasicSubmission):
|
|||||||
sample['sample'] = re.sub('-N\\d$', '', sample['sample'])
|
sample['sample'] = re.sub('-N\\d$', '', sample['sample'])
|
||||||
# NOTE: if sample is already in output skip
|
# NOTE: if sample is already in output skip
|
||||||
if sample['sample'] in [item['sample'] for item in output]:
|
if sample['sample'] in [item['sample'] for item in output]:
|
||||||
|
logger.warning(f"Already have {sample['sample']}")
|
||||||
continue
|
continue
|
||||||
# NOTE: Set ct values
|
# NOTE: Set ct values
|
||||||
sample[f"ct_{sample['target'].lower()}"] = sample['ct'] if isinstance(sample['ct'], float) else 0.0
|
sample[f"ct_{sample['target'].lower()}"] = sample['ct'] if isinstance(sample['ct'], float) else 0.0
|
||||||
|
|||||||
@@ -84,8 +84,9 @@ class SheetParser(object):
|
|||||||
if extraction_kit is None:
|
if extraction_kit is None:
|
||||||
extraction_kit = self.sub['extraction_kit']
|
extraction_kit = self.sub['extraction_kit']
|
||||||
# logger.debug(f"Parsing reagents for {extraction_kit}")
|
# logger.debug(f"Parsing reagents for {extraction_kit}")
|
||||||
self.sub['reagents'] = ReagentParser(xl=self.xl, submission_type=self.submission_type,
|
parser = ReagentParser(xl=self.xl, submission_type=self.submission_type,
|
||||||
extraction_kit=extraction_kit).parse_reagents()
|
extraction_kit=extraction_kit)
|
||||||
|
self.sub['reagents'] = [item for item in parser.parse_reagents()]
|
||||||
|
|
||||||
def parse_samples(self):
|
def parse_samples(self):
|
||||||
"""
|
"""
|
||||||
@@ -303,21 +304,21 @@ class ReagentParser(object):
|
|||||||
|
|
||||||
if isinstance(submission_type, dict):
|
if isinstance(submission_type, dict):
|
||||||
submission_type = submission_type['value']
|
submission_type = submission_type['value']
|
||||||
reagent_map = self.kit_object.construct_xl_map_for_use(submission_type)
|
reagent_map = {k: v for k, v in self.kit_object.construct_xl_map_for_use(submission_type)}
|
||||||
try:
|
try:
|
||||||
del reagent_map['info']
|
del reagent_map['info']
|
||||||
except KeyError:
|
except KeyError:
|
||||||
pass
|
pass
|
||||||
return reagent_map
|
return reagent_map
|
||||||
|
|
||||||
def parse_reagents(self) -> List[dict]:
|
def parse_reagents(self) -> Generator[dict, None, None]:
|
||||||
"""
|
"""
|
||||||
Extracts reagent information from the excel form.
|
Extracts reagent information from the Excel form.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
List[PydReagent]: List of parsed reagents.
|
List[PydReagent]: List of parsed reagents.
|
||||||
"""
|
"""
|
||||||
listo = []
|
# listo = []
|
||||||
for sheet in self.xl.sheetnames:
|
for sheet in self.xl.sheetnames:
|
||||||
ws = self.xl[sheet]
|
ws = self.xl[sheet]
|
||||||
relevant = {k.strip(): v for k, v in self.map.items() if sheet in self.map[k]['sheet']}
|
relevant = {k.strip(): v for k, v in self.map.items() if sheet in self.map[k]['sheet']}
|
||||||
@@ -337,9 +338,8 @@ class ReagentParser(object):
|
|||||||
else:
|
else:
|
||||||
comment = ""
|
comment = ""
|
||||||
except (KeyError, IndexError):
|
except (KeyError, IndexError):
|
||||||
listo.append(
|
yield dict(role=item.strip(), lot=None, expiry=None, name=None, comment="", missing=True)
|
||||||
dict(role=item.strip(), lot=None, expiry=None, name=None, comment="", missing=True))
|
# continue
|
||||||
continue
|
|
||||||
# NOTE: If the cell is blank tell the PydReagent
|
# NOTE: If the cell is blank tell the PydReagent
|
||||||
if check_not_nan(lot):
|
if check_not_nan(lot):
|
||||||
missing = False
|
missing = False
|
||||||
@@ -355,9 +355,9 @@ class ReagentParser(object):
|
|||||||
logger.warning(f"name is not a string.")
|
logger.warning(f"name is not a string.")
|
||||||
check = True
|
check = True
|
||||||
if check:
|
if check:
|
||||||
listo.append(dict(role=item.strip(), lot=lot, expiry=expiry, name=name, comment=comment,
|
yield dict(role=item.strip(), lot=lot, expiry=expiry, name=name, comment=comment,
|
||||||
missing=missing))
|
missing=missing)
|
||||||
return listo
|
# return listo
|
||||||
|
|
||||||
|
|
||||||
class SampleParser(object):
|
class SampleParser(object):
|
||||||
@@ -556,14 +556,14 @@ class EquipmentParser(object):
|
|||||||
self.xl = xl
|
self.xl = xl
|
||||||
self.map = self.fetch_equipment_map()
|
self.map = self.fetch_equipment_map()
|
||||||
|
|
||||||
def fetch_equipment_map(self) -> List[dict]:
|
def fetch_equipment_map(self) -> dict:
|
||||||
"""
|
"""
|
||||||
Gets the map of equipment locations in the submission type's spreadsheet
|
Gets the map of equipment locations in the submission type's spreadsheet
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
List[dict]: List of locations
|
List[dict]: List of locations
|
||||||
"""
|
"""
|
||||||
return self.submission_type.construct_equipment_map()
|
return {k: v for k, v in self.submission_type.construct_equipment_map()}
|
||||||
|
|
||||||
def get_asset_number(self, input: str) -> str:
|
def get_asset_number(self, input: str) -> str:
|
||||||
"""
|
"""
|
||||||
@@ -642,14 +642,14 @@ class TipParser(object):
|
|||||||
self.xl = xl
|
self.xl = xl
|
||||||
self.map = self.fetch_tip_map()
|
self.map = self.fetch_tip_map()
|
||||||
|
|
||||||
def fetch_tip_map(self) -> List[dict]:
|
def fetch_tip_map(self) -> dict:
|
||||||
"""
|
"""
|
||||||
Gets the map of equipment locations in the submission type's spreadsheet
|
Gets the map of equipment locations in the submission type's spreadsheet
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
List[dict]: List of locations
|
List[dict]: List of locations
|
||||||
"""
|
"""
|
||||||
return self.submission_type.construct_tips_map()
|
return {k:v for k,v in self.submission_type.construct_tips_map()}
|
||||||
|
|
||||||
def parse_tips(self) -> List[dict]:
|
def parse_tips(self) -> List[dict]:
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -20,22 +20,20 @@ env = jinja_template_loading()
|
|||||||
class ReportMaker(object):
|
class ReportMaker(object):
|
||||||
|
|
||||||
def __init__(self, start_date: date, end_date: date):
|
def __init__(self, start_date: date, end_date: date):
|
||||||
subs = BasicSubmission.query(start_date=start_date, end_date=end_date)
|
self.start_date = start_date
|
||||||
records = [item.to_dict(report=True) for item in subs]
|
self.end_date = end_date
|
||||||
self.detailed_df, self.summary_df = self.make_report_xlsx(records=records)
|
self.subs = BasicSubmission.query(start_date=start_date, end_date=end_date)
|
||||||
self.html = self.make_report_html(df=self.summary_df, start_date=start_date, end_date=end_date)
|
self.detailed_df, self.summary_df = self.make_report_xlsx()
|
||||||
|
self.html = self.make_report_html(df=self.summary_df)
|
||||||
|
|
||||||
def make_report_xlsx(self, records: list[dict]) -> Tuple[DataFrame, DataFrame]:
|
def make_report_xlsx(self) -> Tuple[DataFrame, DataFrame]:
|
||||||
"""
|
"""
|
||||||
create the dataframe for a report
|
create the dataframe for a report
|
||||||
|
|
||||||
Args:
|
|
||||||
records (list[dict]): list of dictionaries created from submissions
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
DataFrame: output dataframe
|
DataFrame: output dataframe
|
||||||
"""
|
"""
|
||||||
df = DataFrame.from_records(records)
|
df = DataFrame.from_records([item.to_dict(report=True) for item in self.subs])
|
||||||
# NOTE: put submissions with the same lab together
|
# NOTE: put submissions with the same lab together
|
||||||
df = df.sort_values("submitting_lab")
|
df = df.sort_values("submitting_lab")
|
||||||
# NOTE: aggregate cost and sample count columns
|
# NOTE: aggregate cost and sample count columns
|
||||||
@@ -47,7 +45,7 @@ class ReportMaker(object):
|
|||||||
df = df.sort_values(['submitting_lab', "submitted_date"])
|
df = df.sort_values(['submitting_lab', "submitted_date"])
|
||||||
return df, df2
|
return df, df2
|
||||||
|
|
||||||
def make_report_html(self, df: DataFrame, start_date: date, end_date: date) -> str:
|
def make_report_html(self, df: DataFrame) -> str:
|
||||||
|
|
||||||
"""
|
"""
|
||||||
generates html from the report dataframe
|
generates html from the report dataframe
|
||||||
@@ -84,7 +82,7 @@ class ReportMaker(object):
|
|||||||
output.append(adder)
|
output.append(adder)
|
||||||
old_lab = lab
|
old_lab = lab
|
||||||
# logger.debug(output)
|
# logger.debug(output)
|
||||||
dicto = {'start_date': start_date, 'end_date': end_date, 'labs': output}
|
dicto = {'start_date': self.start_date, 'end_date': self.end_date, 'labs': output}
|
||||||
temp = env.get_template('summary_report.html')
|
temp = env.get_template('summary_report.html')
|
||||||
html = temp.render(input=dicto)
|
html = temp.render(input=dicto)
|
||||||
return html
|
return html
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
'''
|
"""
|
||||||
contains writer objects for pushing values to submission sheet templates.
|
contains writer objects for pushing values to submission sheet templates.
|
||||||
'''
|
"""
|
||||||
import logging
|
import logging
|
||||||
from copy import copy
|
from copy import copy
|
||||||
from operator import itemgetter
|
from operator import itemgetter
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
# from pathlib import Path
|
# from pathlib import Path
|
||||||
from pprint import pformat
|
from pprint import pformat
|
||||||
from typing import List
|
from typing import List, Generator
|
||||||
from openpyxl import load_workbook, Workbook
|
from openpyxl import load_workbook, Workbook
|
||||||
from backend.db.models import SubmissionType, KitType, BasicSubmission
|
from backend.db.models import SubmissionType, KitType, BasicSubmission
|
||||||
from backend.validators.pydant import PydSubmission
|
from backend.validators.pydant import PydSubmission
|
||||||
@@ -47,7 +47,6 @@ class SheetWriter(object):
|
|||||||
else:
|
else:
|
||||||
self.sub[k] = v
|
self.sub[k] = v
|
||||||
# logger.debug(f"\n\nWriting to {submission.filepath.__str__()}\n\n")
|
# logger.debug(f"\n\nWriting to {submission.filepath.__str__()}\n\n")
|
||||||
|
|
||||||
if self.filepath.stem.startswith("tmp"):
|
if self.filepath.stem.startswith("tmp"):
|
||||||
template = self.submission_type.template_file
|
template = self.submission_type.template_file
|
||||||
workbook = load_workbook(BytesIO(template))
|
workbook = load_workbook(BytesIO(template))
|
||||||
@@ -148,7 +147,7 @@ class InfoWriter(object):
|
|||||||
Returns:
|
Returns:
|
||||||
dict: merged dictionary
|
dict: merged dictionary
|
||||||
"""
|
"""
|
||||||
output = {}
|
# output = {}
|
||||||
for k, v in info_dict.items():
|
for k, v in info_dict.items():
|
||||||
if v is None:
|
if v is None:
|
||||||
continue
|
continue
|
||||||
@@ -162,9 +161,10 @@ class InfoWriter(object):
|
|||||||
pass
|
pass
|
||||||
dicto['value'] = v
|
dicto['value'] = v
|
||||||
if len(dicto) > 0:
|
if len(dicto) > 0:
|
||||||
output[k] = dicto
|
# output[k] = dicto
|
||||||
|
yield k, dicto
|
||||||
# logger.debug(f"Reconciled info: {pformat(output)}")
|
# logger.debug(f"Reconciled info: {pformat(output)}")
|
||||||
return output
|
# return output
|
||||||
|
|
||||||
def write_info(self) -> Workbook:
|
def write_info(self) -> Workbook:
|
||||||
"""
|
"""
|
||||||
@@ -173,7 +173,7 @@ class InfoWriter(object):
|
|||||||
Returns:
|
Returns:
|
||||||
Workbook: workbook with info written.
|
Workbook: workbook with info written.
|
||||||
"""
|
"""
|
||||||
for k, v in self.info.items():
|
for k, v in self.info:
|
||||||
# NOTE: merge all comments to fit in single cell.
|
# NOTE: merge all comments to fit in single cell.
|
||||||
if k == "comment" and isinstance(v['value'], list):
|
if k == "comment" and isinstance(v['value'], list):
|
||||||
json_join = [item['text'] for item in v['value'] if 'text' in item.keys()]
|
json_join = [item['text'] for item in v['value'] if 'text' in item.keys()]
|
||||||
@@ -209,10 +209,11 @@ class ReagentWriter(object):
|
|||||||
submission_type = SubmissionType.query(name=submission_type)
|
submission_type = SubmissionType.query(name=submission_type)
|
||||||
if isinstance(extraction_kit, str):
|
if isinstance(extraction_kit, str):
|
||||||
kit_type = KitType.query(name=extraction_kit)
|
kit_type = KitType.query(name=extraction_kit)
|
||||||
reagent_map = kit_type.construct_xl_map_for_use(submission_type)
|
reagent_map = {k: v for k, v in kit_type.construct_xl_map_for_use(submission_type)}
|
||||||
|
# self.reagents = {k: v for k, v in self.reconcile_map(reagent_list=reagent_list, reagent_map=reagent_map)}
|
||||||
self.reagents = self.reconcile_map(reagent_list=reagent_list, reagent_map=reagent_map)
|
self.reagents = self.reconcile_map(reagent_list=reagent_list, reagent_map=reagent_map)
|
||||||
|
|
||||||
def reconcile_map(self, reagent_list: List[dict], reagent_map: dict) -> List[dict]:
|
def reconcile_map(self, reagent_list: List[dict], reagent_map: dict) -> Generator[dict, None, None]:
|
||||||
"""
|
"""
|
||||||
Merge reagents with their locations
|
Merge reagents with their locations
|
||||||
|
|
||||||
@@ -223,7 +224,7 @@ class ReagentWriter(object):
|
|||||||
Returns:
|
Returns:
|
||||||
List[dict]: merged dictionary
|
List[dict]: merged dictionary
|
||||||
"""
|
"""
|
||||||
output = []
|
# output = []
|
||||||
for reagent in reagent_list:
|
for reagent in reagent_list:
|
||||||
try:
|
try:
|
||||||
mp_info = reagent_map[reagent['role']]
|
mp_info = reagent_map[reagent['role']]
|
||||||
@@ -238,8 +239,9 @@ class ReagentWriter(object):
|
|||||||
dicto = v
|
dicto = v
|
||||||
placeholder[k] = dicto
|
placeholder[k] = dicto
|
||||||
placeholder['sheet'] = mp_info['sheet']
|
placeholder['sheet'] = mp_info['sheet']
|
||||||
output.append(placeholder)
|
# output.append(placeholder)
|
||||||
return output
|
yield placeholder
|
||||||
|
# return output
|
||||||
|
|
||||||
def write_reagents(self) -> Workbook:
|
def write_reagents(self) -> Workbook:
|
||||||
"""
|
"""
|
||||||
@@ -263,6 +265,7 @@ class SampleWriter(object):
|
|||||||
"""
|
"""
|
||||||
object to write sample data into excel file
|
object to write sample data into excel file
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, xl: Workbook, submission_type: SubmissionType | str, sample_list: list):
|
def __init__(self, xl: Workbook, submission_type: SubmissionType | str, sample_list: list):
|
||||||
"""
|
"""
|
||||||
Args:
|
Args:
|
||||||
@@ -275,9 +278,11 @@ class SampleWriter(object):
|
|||||||
self.submission_type = submission_type
|
self.submission_type = submission_type
|
||||||
self.xl = xl
|
self.xl = xl
|
||||||
self.sample_map = submission_type.construct_sample_map()['lookup_table']
|
self.sample_map = submission_type.construct_sample_map()['lookup_table']
|
||||||
self.samples = self.reconcile_map(sample_list)
|
# self.samples = self.reconcile_map(sample_list)
|
||||||
|
samples = [item for item in self.reconcile_map(sample_list)]
|
||||||
|
self.samples = sorted(samples, key=lambda k: k['submission_rank'])
|
||||||
|
|
||||||
def reconcile_map(self, sample_list: list) -> List[dict]:
|
def reconcile_map(self, sample_list: list) -> Generator[dict, None, None]:
|
||||||
"""
|
"""
|
||||||
Merge sample info with locations
|
Merge sample info with locations
|
||||||
|
|
||||||
@@ -287,7 +292,7 @@ class SampleWriter(object):
|
|||||||
Returns:
|
Returns:
|
||||||
List[dict]: List of merged dictionaries
|
List[dict]: List of merged dictionaries
|
||||||
"""
|
"""
|
||||||
output = []
|
# output = []
|
||||||
multiples = ['row', 'column', 'assoc_id', 'submission_rank']
|
multiples = ['row', 'column', 'assoc_id', 'submission_rank']
|
||||||
for sample in sample_list:
|
for sample in sample_list:
|
||||||
# logger.debug(f"Writing sample: {sample}")
|
# logger.debug(f"Writing sample: {sample}")
|
||||||
@@ -297,8 +302,8 @@ class SampleWriter(object):
|
|||||||
if k in multiples:
|
if k in multiples:
|
||||||
continue
|
continue
|
||||||
new[k] = v
|
new[k] = v
|
||||||
output.append(new)
|
yield new
|
||||||
return sorted(output, key=lambda k: k['submission_rank'])
|
# return sorted(output, key=lambda k: k['submission_rank'])
|
||||||
|
|
||||||
def write_samples(self) -> Workbook:
|
def write_samples(self) -> Workbook:
|
||||||
"""
|
"""
|
||||||
@@ -336,10 +341,10 @@ class EquipmentWriter(object):
|
|||||||
submission_type = SubmissionType.query(name=submission_type)
|
submission_type = SubmissionType.query(name=submission_type)
|
||||||
self.submission_type = submission_type
|
self.submission_type = submission_type
|
||||||
self.xl = xl
|
self.xl = xl
|
||||||
equipment_map = self.submission_type.construct_equipment_map()
|
equipment_map = {k: v for k, v in self.submission_type.construct_equipment_map()}
|
||||||
self.equipment = self.reconcile_map(equipment_list=equipment_list, equipment_map=equipment_map)
|
self.equipment = self.reconcile_map(equipment_list=equipment_list, equipment_map=equipment_map)
|
||||||
|
|
||||||
def reconcile_map(self, equipment_list: list, equipment_map: dict) -> List[dict]:
|
def reconcile_map(self, equipment_list: list, equipment_map: dict) -> Generator[dict, None, None]:
|
||||||
"""
|
"""
|
||||||
Merges equipment with location data
|
Merges equipment with location data
|
||||||
|
|
||||||
@@ -350,9 +355,9 @@ class EquipmentWriter(object):
|
|||||||
Returns:
|
Returns:
|
||||||
List[dict]: List of merged dictionaries
|
List[dict]: List of merged dictionaries
|
||||||
"""
|
"""
|
||||||
output = []
|
# output = []
|
||||||
if equipment_list is None:
|
if equipment_list is None:
|
||||||
return output
|
return
|
||||||
for ii, equipment in enumerate(equipment_list, start=1):
|
for ii, equipment in enumerate(equipment_list, start=1):
|
||||||
mp_info = equipment_map[equipment['role']]
|
mp_info = equipment_map[equipment['role']]
|
||||||
# logger.debug(f"{equipment['role']} map: {mp_info}")
|
# logger.debug(f"{equipment['role']} map: {mp_info}")
|
||||||
@@ -376,8 +381,9 @@ class EquipmentWriter(object):
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
placeholder['sheet'] = "Equipment"
|
placeholder['sheet'] = "Equipment"
|
||||||
# logger.debug(f"Final output of {equipment['role']} : {placeholder}")
|
# logger.debug(f"Final output of {equipment['role']} : {placeholder}")
|
||||||
output.append(placeholder)
|
yield placeholder
|
||||||
return output
|
# output.append(placeholder)
|
||||||
|
# return output
|
||||||
|
|
||||||
def write_equipment(self) -> Workbook:
|
def write_equipment(self) -> Workbook:
|
||||||
"""
|
"""
|
||||||
@@ -424,10 +430,10 @@ class TipWriter(object):
|
|||||||
submission_type = SubmissionType.query(name=submission_type)
|
submission_type = SubmissionType.query(name=submission_type)
|
||||||
self.submission_type = submission_type
|
self.submission_type = submission_type
|
||||||
self.xl = xl
|
self.xl = xl
|
||||||
tips_map = self.submission_type.construct_tips_map()
|
tips_map = {k: v for k, v in self.submission_type.construct_tips_map()}
|
||||||
self.tips = self.reconcile_map(tips_list=tips_list, tips_map=tips_map)
|
self.tips = self.reconcile_map(tips_list=tips_list, tips_map=tips_map)
|
||||||
|
|
||||||
def reconcile_map(self, tips_list: List[dict], tips_map: dict) -> List[dict]:
|
def reconcile_map(self, tips_list: List[dict], tips_map: dict) -> Generator[dict, None, None]:
|
||||||
"""
|
"""
|
||||||
Merges tips with location data
|
Merges tips with location data
|
||||||
|
|
||||||
@@ -438,9 +444,9 @@ class TipWriter(object):
|
|||||||
Returns:
|
Returns:
|
||||||
List[dict]: List of merged dictionaries
|
List[dict]: List of merged dictionaries
|
||||||
"""
|
"""
|
||||||
output = []
|
# output = []
|
||||||
if tips_list is None:
|
if tips_list is None:
|
||||||
return output
|
return
|
||||||
for ii, tips in enumerate(tips_list, start=1):
|
for ii, tips in enumerate(tips_list, start=1):
|
||||||
mp_info = tips_map[tips['role']]
|
mp_info = tips_map[tips['role']]
|
||||||
# logger.debug(f"{tips['role']} map: {mp_info}")
|
# logger.debug(f"{tips['role']} map: {mp_info}")
|
||||||
@@ -462,8 +468,9 @@ class TipWriter(object):
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
placeholder['sheet'] = "Tips"
|
placeholder['sheet'] = "Tips"
|
||||||
# logger.debug(f"Final output of {tips['role']} : {placeholder}")
|
# logger.debug(f"Final output of {tips['role']} : {placeholder}")
|
||||||
output.append(placeholder)
|
yield placeholder
|
||||||
return output
|
# output.append(placeholder)
|
||||||
|
# return output
|
||||||
|
|
||||||
def write_tips(self) -> Workbook:
|
def write_tips(self) -> Workbook:
|
||||||
"""
|
"""
|
||||||
@@ -530,12 +537,12 @@ class DocxWriter(object):
|
|||||||
rows = max([sample['row'] for sample in sample_list])
|
rows = max([sample['row'] for sample in sample_list])
|
||||||
if columns == 0:
|
if columns == 0:
|
||||||
columns = max([sample['column'] for sample in sample_list])
|
columns = max([sample['column'] for sample in sample_list])
|
||||||
output = []
|
# output = []
|
||||||
for row in range(0, rows):
|
for row in range(0, rows):
|
||||||
contents = [''] * columns
|
contents = [''] * columns
|
||||||
for column in range(0, columns):
|
for column in range(0, columns):
|
||||||
try:
|
try:
|
||||||
ooi = [item for item in sample_list if item['row']==row+1 and item['column']==column+1][0]
|
ooi = [item for item in sample_list if item['row'] == row + 1 and item['column'] == column + 1][0]
|
||||||
except IndexError:
|
except IndexError:
|
||||||
continue
|
continue
|
||||||
contents[column] = ooi['submitter_id']
|
contents[column] = ooi['submitter_id']
|
||||||
@@ -545,8 +552,9 @@ class DocxWriter(object):
|
|||||||
contents += [''] * (columns - len(contents))
|
contents += [''] * (columns - len(contents))
|
||||||
if not contents:
|
if not contents:
|
||||||
contents = [''] * columns
|
contents = [''] * columns
|
||||||
output.append(contents)
|
yield contents
|
||||||
return output
|
# output.append(contents)
|
||||||
|
# return output
|
||||||
|
|
||||||
def create_merged_template(self, *args) -> BytesIO:
|
def create_merged_template(self, *args) -> BytesIO:
|
||||||
"""
|
"""
|
||||||
@@ -567,7 +575,6 @@ class DocxWriter(object):
|
|||||||
merged_document.save(output)
|
merged_document.save(output)
|
||||||
return output
|
return output
|
||||||
|
|
||||||
|
|
||||||
def save(self, filename: Path | str):
|
def save(self, filename: Path | str):
|
||||||
if isinstance(filename, str):
|
if isinstance(filename, str):
|
||||||
filename = Path(filename)
|
filename = Path(filename)
|
||||||
|
|||||||
@@ -851,6 +851,7 @@ class PydSubmission(BaseModel, extra='allow'):
|
|||||||
# logger.debug(f"Template rendered as: {render}")
|
# logger.debug(f"Template rendered as: {render}")
|
||||||
return render
|
return render
|
||||||
|
|
||||||
|
@report_result
|
||||||
def check_kit_integrity(self, extraction_kit: str | dict | None = None) -> Tuple[List[PydReagent], Report]:
|
def check_kit_integrity(self, extraction_kit: str | dict | None = None) -> Tuple[List[PydReagent], Report]:
|
||||||
"""
|
"""
|
||||||
Ensures all reagents expected in kit are listed in Submission
|
Ensures all reagents expected in kit are listed in Submission
|
||||||
@@ -873,7 +874,7 @@ class PydSubmission(BaseModel, extra='allow'):
|
|||||||
ext_kit.get_reagents(required=True, submission_type=self.submission_type['value'])]
|
ext_kit.get_reagents(required=True, submission_type=self.submission_type['value'])]
|
||||||
# logger.debug(f"Kit reagents: {ext_kit_rtypes}")
|
# logger.debug(f"Kit reagents: {ext_kit_rtypes}")
|
||||||
# logger.debug(f"Submission reagents: {self.reagents}")
|
# logger.debug(f"Submission reagents: {self.reagents}")
|
||||||
# Exclude any reagenttype found in this pyd not expected in kit.
|
# NOTE: Exclude any reagenttype found in this pyd not expected in kit.
|
||||||
expected_check = [item.role for item in ext_kit_rtypes]
|
expected_check = [item.role for item in ext_kit_rtypes]
|
||||||
output_reagents = [rt for rt in self.reagents if rt.role in expected_check]
|
output_reagents = [rt for rt in self.reagents if rt.role in expected_check]
|
||||||
# logger.debug(f"Already have these reagent types: {output_reagents}")
|
# logger.debug(f"Already have these reagent types: {output_reagents}")
|
||||||
@@ -882,7 +883,7 @@ class PydSubmission(BaseModel, extra='allow'):
|
|||||||
missing_reagents += [rt for rt in output_reagents if rt.missing]
|
missing_reagents += [rt for rt in output_reagents if rt.missing]
|
||||||
output_reagents += [rt for rt in missing_reagents if rt not in output_reagents]
|
output_reagents += [rt for rt in missing_reagents if rt not in output_reagents]
|
||||||
# logger.debug(f"Missing reagents types: {missing_reagents}")
|
# logger.debug(f"Missing reagents types: {missing_reagents}")
|
||||||
# if lists are equal return no problem
|
# NOTE: if lists are equal return no problem
|
||||||
if len(missing_reagents) == 0:
|
if len(missing_reagents) == 0:
|
||||||
result = None
|
result = None
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -1,16 +1,12 @@
|
|||||||
"""
|
"""
|
||||||
Functions for constructing controls graphs using plotly.
|
Functions for constructing controls graphs using plotly.
|
||||||
TODO: Move these functions to widgets.controls_charts
|
|
||||||
"""
|
"""
|
||||||
import re
|
|
||||||
import plotly
|
import plotly
|
||||||
import plotly.express as px
|
import plotly.express as px
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from pandas import DataFrame
|
|
||||||
from plotly.graph_objects import Figure
|
from plotly.graph_objects import Figure
|
||||||
import logging
|
import logging
|
||||||
# from backend.excel import get_unique_values_in_df_column
|
from tools import get_unique_values_in_df_column, divide_chunks
|
||||||
from tools import Settings, get_unique_values_in_df_column, divide_chunks
|
|
||||||
from frontend.widgets.functions import select_save_file
|
from frontend.widgets.functions import select_save_file
|
||||||
|
|
||||||
logger = logging.getLogger(f"submissions.{__name__}")
|
logger = logging.getLogger(f"submissions.{__name__}")
|
||||||
@@ -18,174 +14,12 @@ logger = logging.getLogger(f"submissions.{__name__}")
|
|||||||
|
|
||||||
class CustomFigure(Figure):
|
class CustomFigure(Figure):
|
||||||
|
|
||||||
def __init__(self, ctx: Settings, df: pd.DataFrame, ytitle: str | None = None):
|
def __init__(self, df: pd.DataFrame, modes: list, ytitle: str | None = None):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
self.construct_chart(df=df, modes=modes)
|
||||||
|
self.generic_figure_markers(modes=modes, ytitle=ytitle)
|
||||||
|
|
||||||
|
def construct_chart(self, df: pd.DataFrame, modes: list):
|
||||||
# NOTE: Start here.
|
|
||||||
def create_charts(ctx: Settings, df: pd.DataFrame, ytitle: str | None = None) -> Figure:
|
|
||||||
"""
|
|
||||||
Constructs figures based on parsed pandas dataframe.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
ctx (Settings): settings passed down from gui
|
|
||||||
df (pd.DataFrame): input dataframe
|
|
||||||
ytitle (str | None, optional): title for the y-axis. Defaults to None.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Figure: Plotly figure
|
|
||||||
"""
|
|
||||||
# from backend.excel import drop_reruns_from_df
|
|
||||||
# converts starred genera to normal and splits off list of starred
|
|
||||||
genera = []
|
|
||||||
if df.empty:
|
|
||||||
return None
|
|
||||||
for item in df['genus'].to_list():
|
|
||||||
try:
|
|
||||||
if item[-1] == "*":
|
|
||||||
genera.append(item[-1])
|
|
||||||
else:
|
|
||||||
genera.append("")
|
|
||||||
except IndexError:
|
|
||||||
genera.append("")
|
|
||||||
df['genus'] = df['genus'].replace({'\*': ''}, regex=True).replace({"NaN": "Unknown"})
|
|
||||||
df['genera'] = genera
|
|
||||||
# NOTE: remove original runs, using reruns if applicable
|
|
||||||
df = drop_reruns_from_df(ctx=ctx, df=df)
|
|
||||||
# NOTE: sort by and exclude from
|
|
||||||
sorts = ['submitted_date', "target", "genus"]
|
|
||||||
exclude = ['name', 'genera']
|
|
||||||
modes = [item for item in df.columns if item not in sorts and item not in exclude] # and "_hashes" not in item]
|
|
||||||
# NOTE: Set descending for any columns that have "{mode}" in the header.
|
|
||||||
ascending = [False if item == "target" else True for item in sorts]
|
|
||||||
df = df.sort_values(by=sorts, ascending=ascending)
|
|
||||||
# logger.debug(df[df.isna().any(axis=1)])
|
|
||||||
# NOTE: actual chart construction is done by
|
|
||||||
fig = construct_chart(df=df, modes=modes, ytitle=ytitle)
|
|
||||||
return fig
|
|
||||||
|
|
||||||
|
|
||||||
def drop_reruns_from_df(ctx: Settings, df: DataFrame) -> DataFrame:
|
|
||||||
"""
|
|
||||||
Removes semi-duplicates from dataframe after finding sequencing repeats.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
settings (dict): settings passed from gui
|
|
||||||
df (DataFrame): initial dataframe
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
DataFrame: dataframe with originals removed in favour of repeats.
|
|
||||||
"""
|
|
||||||
if 'rerun_regex' in ctx:
|
|
||||||
sample_names = get_unique_values_in_df_column(df, column_name="name")
|
|
||||||
rerun_regex = re.compile(fr"{ctx.rerun_regex}")
|
|
||||||
for sample in sample_names:
|
|
||||||
if rerun_regex.search(sample):
|
|
||||||
first_run = re.sub(rerun_regex, "", sample)
|
|
||||||
df = df.drop(df[df.name == first_run].index)
|
|
||||||
return df
|
|
||||||
|
|
||||||
|
|
||||||
def generic_figure_markers(fig: Figure, modes: list = [], ytitle: str | None = None) -> Figure:
|
|
||||||
"""
|
|
||||||
Adds standard layout to figure.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
fig (Figure): Input figure.
|
|
||||||
modes (list, optional): List of modes included in figure. Defaults to [].
|
|
||||||
ytitle (str, optional): Title for the y-axis. Defaults to None.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Figure: Output figure with updated titles, rangeslider, buttons.
|
|
||||||
"""
|
|
||||||
if modes != []:
|
|
||||||
ytitle = modes[0]
|
|
||||||
# Creating visibles list for each mode.
|
|
||||||
fig.update_layout(
|
|
||||||
xaxis_title="Submitted Date (* - Date parsed from fastq file creation date)",
|
|
||||||
yaxis_title=ytitle,
|
|
||||||
showlegend=True,
|
|
||||||
barmode='stack',
|
|
||||||
updatemenus=[
|
|
||||||
dict(
|
|
||||||
type="buttons",
|
|
||||||
direction="right",
|
|
||||||
x=0.7,
|
|
||||||
y=1.2,
|
|
||||||
showactive=True,
|
|
||||||
buttons=make_buttons(modes=modes, fig_len=len(fig.data)),
|
|
||||||
)
|
|
||||||
]
|
|
||||||
)
|
|
||||||
fig.update_xaxes(
|
|
||||||
rangeslider_visible=True,
|
|
||||||
rangeselector=dict(
|
|
||||||
buttons=list([
|
|
||||||
dict(count=1, label="1m", step="month", stepmode="backward"),
|
|
||||||
dict(count=3, label="3m", step="month", stepmode="backward"),
|
|
||||||
dict(count=6, label="6m", step="month", stepmode="backward"),
|
|
||||||
dict(count=1, label="YTD", step="year", stepmode="todate"),
|
|
||||||
dict(count=1, label="1y", step="year", stepmode="backward"),
|
|
||||||
dict(step="all")
|
|
||||||
])
|
|
||||||
)
|
|
||||||
)
|
|
||||||
assert type(fig) == Figure
|
|
||||||
return fig
|
|
||||||
|
|
||||||
|
|
||||||
def make_buttons(modes: list, fig_len: int) -> list:
|
|
||||||
"""
|
|
||||||
Creates list of buttons with one for each mode to be used in showing/hiding mode traces.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
modes (list): list of modes used by main parser.
|
|
||||||
fig_len (int): number of traces in the figure
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
list: list of buttons.
|
|
||||||
"""
|
|
||||||
buttons = []
|
|
||||||
if len(modes) > 1:
|
|
||||||
for ii, mode in enumerate(modes):
|
|
||||||
# What I need to do is create a list of bools with the same length as the fig.data
|
|
||||||
mode_vis = [True] * fig_len
|
|
||||||
# And break it into {len(modes)} chunks
|
|
||||||
mode_vis = list(divide_chunks(mode_vis, len(modes)))
|
|
||||||
# Then, for each chunk, if the chunk index isn't equal to the index of the current mode, set to false
|
|
||||||
for jj, sublist in enumerate(mode_vis):
|
|
||||||
if jj != ii:
|
|
||||||
mode_vis[jj] = [not elem for elem in mode_vis[jj]]
|
|
||||||
# Finally, flatten list.
|
|
||||||
mode_vis = [item for sublist in mode_vis for item in sublist]
|
|
||||||
# Now, make button to add to list
|
|
||||||
buttons.append(dict(label=mode, method="update", args=[
|
|
||||||
{"visible": mode_vis},
|
|
||||||
{"yaxis.title.text": mode},
|
|
||||||
]
|
|
||||||
))
|
|
||||||
return buttons
|
|
||||||
|
|
||||||
|
|
||||||
def output_figures(figs: list, group_name: str):
|
|
||||||
"""
|
|
||||||
Writes plotly figure to html file.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
settings (dict): settings passed down from click
|
|
||||||
fig (Figure): input figure object
|
|
||||||
group_name (str): controltype
|
|
||||||
"""
|
|
||||||
output = select_save_file(None, default_name=group_name, extension="html")
|
|
||||||
with open(output, "w") as f:
|
|
||||||
for fig in figs:
|
|
||||||
try:
|
|
||||||
f.write(fig.to_html(full_html=False, include_plotlyjs='cdn'))
|
|
||||||
except AttributeError:
|
|
||||||
logger.error(f"The following figure was a string: {fig}")
|
|
||||||
|
|
||||||
|
|
||||||
def construct_chart(df: pd.DataFrame, modes: list, ytitle: str | None = None) -> Figure:
|
|
||||||
"""
|
"""
|
||||||
Creates a plotly chart for controls from a pandas dataframe
|
Creates a plotly chart for controls from a pandas dataframe
|
||||||
|
|
||||||
@@ -197,7 +31,7 @@ def construct_chart(df: pd.DataFrame, modes: list, ytitle: str | None = None) ->
|
|||||||
Returns:
|
Returns:
|
||||||
Figure: output stacked bar chart.
|
Figure: output stacked bar chart.
|
||||||
"""
|
"""
|
||||||
fig = Figure()
|
# fig = Figure()
|
||||||
for ii, mode in enumerate(modes):
|
for ii, mode in enumerate(modes):
|
||||||
if "count" in mode:
|
if "count" in mode:
|
||||||
df[mode] = pd.to_numeric(df[mode], errors='coerce')
|
df[mode] = pd.to_numeric(df[mode], errors='coerce')
|
||||||
@@ -215,7 +49,8 @@ def construct_chart(df: pd.DataFrame, modes: list, ytitle: str | None = None) ->
|
|||||||
color_discrete_sequence = ['red']
|
color_discrete_sequence = ['red']
|
||||||
case _:
|
case _:
|
||||||
color_discrete_sequence = ['blue', 'red']
|
color_discrete_sequence = ['blue', 'red']
|
||||||
bar = px.bar(df, x="submitted_date",
|
bar = px.bar(df,
|
||||||
|
x="submitted_date",
|
||||||
y=mode,
|
y=mode,
|
||||||
color=color,
|
color=color,
|
||||||
title=mode,
|
title=mode,
|
||||||
@@ -225,11 +60,104 @@ def construct_chart(df: pd.DataFrame, modes: list, ytitle: str | None = None) ->
|
|||||||
color_discrete_sequence=color_discrete_sequence
|
color_discrete_sequence=color_discrete_sequence
|
||||||
)
|
)
|
||||||
bar.update_traces(visible=ii == 0)
|
bar.update_traces(visible=ii == 0)
|
||||||
fig.add_traces(bar.data)
|
self.add_traces(bar.data)
|
||||||
return generic_figure_markers(fig=fig, modes=modes, ytitle=ytitle)
|
# return generic_figure_markers(modes=modes, ytitle=ytitle)
|
||||||
|
|
||||||
|
def generic_figure_markers(self, modes: list = [], ytitle: str | None = None):
|
||||||
|
"""
|
||||||
|
Adds standard layout to figure.
|
||||||
|
|
||||||
def construct_html(figure: Figure) -> str:
|
Args:
|
||||||
|
fig (Figure): Input figure.
|
||||||
|
modes (list, optional): List of modes included in figure. Defaults to [].
|
||||||
|
ytitle (str, optional): Title for the y-axis. Defaults to None.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Figure: Output figure with updated titles, rangeslider, buttons.
|
||||||
|
"""
|
||||||
|
if modes:
|
||||||
|
ytitle = modes[0]
|
||||||
|
# Creating visibles list for each mode.
|
||||||
|
self.update_layout(
|
||||||
|
xaxis_title="Submitted Date (* - Date parsed from fastq file creation date)",
|
||||||
|
yaxis_title=ytitle,
|
||||||
|
showlegend=True,
|
||||||
|
barmode='stack',
|
||||||
|
updatemenus=[
|
||||||
|
dict(
|
||||||
|
type="buttons",
|
||||||
|
direction="right",
|
||||||
|
x=0.7,
|
||||||
|
y=1.2,
|
||||||
|
showactive=True,
|
||||||
|
buttons=[button for button in self.make_buttons(modes=modes)],
|
||||||
|
)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
self.update_xaxes(
|
||||||
|
rangeslider_visible=True,
|
||||||
|
rangeselector=dict(
|
||||||
|
buttons=list([
|
||||||
|
dict(count=1, label="1m", step="month", stepmode="backward"),
|
||||||
|
dict(count=3, label="3m", step="month", stepmode="backward"),
|
||||||
|
dict(count=6, label="6m", step="month", stepmode="backward"),
|
||||||
|
dict(count=1, label="YTD", step="year", stepmode="todate"),
|
||||||
|
dict(count=1, label="1y", step="year", stepmode="backward"),
|
||||||
|
dict(step="all")
|
||||||
|
])
|
||||||
|
)
|
||||||
|
)
|
||||||
|
assert isinstance(self, Figure)
|
||||||
|
# return fig
|
||||||
|
|
||||||
|
def make_buttons(self, modes: list) -> list:
|
||||||
|
"""
|
||||||
|
Creates list of buttons with one for each mode to be used in showing/hiding mode traces.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
modes (list): list of modes used by main parser.
|
||||||
|
fig_len (int): number of traces in the figure
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: list of buttons.
|
||||||
|
"""
|
||||||
|
fig_len = len(self.data)
|
||||||
|
if len(modes) > 1:
|
||||||
|
for ii, mode in enumerate(modes):
|
||||||
|
# What I need to do is create a list of bools with the same length as the fig.data
|
||||||
|
mode_vis = [True] * fig_len
|
||||||
|
# And break it into {len(modes)} chunks
|
||||||
|
mode_vis = list(divide_chunks(mode_vis, len(modes)))
|
||||||
|
# Then, for each chunk, if the chunk index isn't equal to the index of the current mode, set to false
|
||||||
|
for jj, sublist in enumerate(mode_vis):
|
||||||
|
if jj != ii:
|
||||||
|
mode_vis[jj] = [not elem for elem in mode_vis[jj]]
|
||||||
|
# Finally, flatten list.
|
||||||
|
mode_vis = [item for sublist in mode_vis for item in sublist]
|
||||||
|
# Now, yield button to add to list
|
||||||
|
yield dict(label=mode, method="update", args=[
|
||||||
|
{"visible": mode_vis},
|
||||||
|
{"yaxis.title.text": mode},
|
||||||
|
])
|
||||||
|
|
||||||
|
def save_figure(self, group_name: str = "plotly_output"):
|
||||||
|
"""
|
||||||
|
Writes plotly figure to html file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
figs ():
|
||||||
|
settings (dict): settings passed down from click
|
||||||
|
fig (Figure): input figure object
|
||||||
|
group_name (str): controltype
|
||||||
|
"""
|
||||||
|
output = select_save_file(None, default_name=group_name, extension="html")
|
||||||
|
with open(output, "w") as f:
|
||||||
|
try:
|
||||||
|
f.write(self.to_html())
|
||||||
|
except AttributeError:
|
||||||
|
logger.error(f"The following figure was a string: {self}")
|
||||||
|
|
||||||
|
def to_html(self) -> str:
|
||||||
"""
|
"""
|
||||||
Creates final html code from plotly
|
Creates final html code from plotly
|
||||||
|
|
||||||
@@ -240,8 +168,8 @@ def construct_html(figure: Figure) -> str:
|
|||||||
str: html string
|
str: html string
|
||||||
"""
|
"""
|
||||||
html = '<html><body>'
|
html = '<html><body>'
|
||||||
if figure is not None:
|
if self is not None:
|
||||||
html += plotly.offline.plot(figure, output_type='div',
|
html += plotly.offline.plot(self, output_type='div',
|
||||||
include_plotlyjs='cdn') #, image = 'png', auto_open=True, image_filename='plot_image')
|
include_plotlyjs='cdn') #, image = 'png', auto_open=True, image_filename='plot_image')
|
||||||
else:
|
else:
|
||||||
html += "<h1>No data was retrieved for the given parameters.</h1>"
|
html += "<h1>No data was retrieved for the given parameters.</h1>"
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
"""
|
"""
|
||||||
Handles display of control charts
|
Handles display of control charts
|
||||||
"""
|
"""
|
||||||
|
import re
|
||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
from typing import Tuple
|
from typing import Tuple
|
||||||
|
|
||||||
from PyQt6.QtWebEngineWidgets import QWebEngineView
|
from PyQt6.QtWebEngineWidgets import QWebEngineView
|
||||||
from PyQt6.QtWidgets import (
|
from PyQt6.QtWidgets import (
|
||||||
QWidget, QVBoxLayout, QComboBox, QHBoxLayout,
|
QWidget, QVBoxLayout, QComboBox, QHBoxLayout,
|
||||||
@@ -14,9 +14,9 @@ from backend.db import ControlType, Control
|
|||||||
from PyQt6.QtCore import QDate, QSize
|
from PyQt6.QtCore import QDate, QSize
|
||||||
import logging
|
import logging
|
||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
from tools import Report, Result
|
from tools import Report, Result, get_unique_values_in_df_column, Settings, report_result
|
||||||
# from backend.excel.reports import convert_data_list_to_df
|
# from backend.excel.reports import convert_data_list_to_df
|
||||||
from frontend.visualizations.control_charts import create_charts, construct_html
|
from frontend.visualizations.control_charts import CustomFigure
|
||||||
|
|
||||||
logger = logging.getLogger(f"submissions.{__name__}")
|
logger = logging.getLogger(f"submissions.{__name__}")
|
||||||
|
|
||||||
@@ -61,6 +61,7 @@ class ControlsViewer(QWidget):
|
|||||||
"""
|
"""
|
||||||
self.controls_getter_function()
|
self.controls_getter_function()
|
||||||
|
|
||||||
|
@report_result
|
||||||
def controls_getter_function(self):
|
def controls_getter_function(self):
|
||||||
"""
|
"""
|
||||||
Get controls based on start/end dates
|
Get controls based on start/end dates
|
||||||
@@ -103,7 +104,7 @@ class ControlsViewer(QWidget):
|
|||||||
self.sub_typer.clear()
|
self.sub_typer.clear()
|
||||||
self.sub_typer.setEnabled(False)
|
self.sub_typer.setEnabled(False)
|
||||||
self.chart_maker()
|
self.chart_maker()
|
||||||
self.report.add_result(report)
|
return report
|
||||||
|
|
||||||
def chart_maker(self):
|
def chart_maker(self):
|
||||||
"""
|
"""
|
||||||
@@ -111,6 +112,7 @@ class ControlsViewer(QWidget):
|
|||||||
"""
|
"""
|
||||||
self.chart_maker_function()
|
self.chart_maker_function()
|
||||||
|
|
||||||
|
@report_result
|
||||||
def chart_maker_function(self):
|
def chart_maker_function(self):
|
||||||
"""
|
"""
|
||||||
Create html chart for controls reporting
|
Create html chart for controls reporting
|
||||||
@@ -141,7 +143,7 @@ class ControlsViewer(QWidget):
|
|||||||
data = [item for sublist in data for item in sublist]
|
data = [item for sublist in data for item in sublist]
|
||||||
# logger.debug(f"Control objects going into df conversion: {type(data)}")
|
# logger.debug(f"Control objects going into df conversion: {type(data)}")
|
||||||
if not data:
|
if not data:
|
||||||
self.report.add_result(Result(status="Critical", msg="No data found for controls in given date range."))
|
report.add_result(Result(status="Critical", msg="No data found for controls in given date range."))
|
||||||
return
|
return
|
||||||
# NOTE send to dataframe creator
|
# NOTE send to dataframe creator
|
||||||
df = self.convert_data_list_to_df(input_df=data)
|
df = self.convert_data_list_to_df(input_df=data)
|
||||||
@@ -150,15 +152,16 @@ class ControlsViewer(QWidget):
|
|||||||
else:
|
else:
|
||||||
title = f"{self.mode} - {self.subtype}"
|
title = f"{self.mode} - {self.subtype}"
|
||||||
# NOTE: send dataframe to chart maker
|
# NOTE: send dataframe to chart maker
|
||||||
fig = create_charts(ctx=self.app.ctx, df=df, ytitle=title)
|
df, modes = self.prep_df(ctx=self.app.ctx, df=df)
|
||||||
|
fig = CustomFigure(df=df, ytitle=title, modes=modes)
|
||||||
# logger.debug(f"Updating figure...")
|
# logger.debug(f"Updating figure...")
|
||||||
# NOTE: construct html for webview
|
# NOTE: construct html for webview
|
||||||
html = construct_html(figure=fig)
|
html = fig.to_html()
|
||||||
# logger.debug(f"The length of html code is: {len(html)}")
|
# logger.debug(f"The length of html code is: {len(html)}")
|
||||||
self.webengineview.setHtml(html)
|
self.webengineview.setHtml(html)
|
||||||
self.webengineview.update()
|
self.webengineview.update()
|
||||||
# logger.debug("Figure updated... I hope.")
|
# logger.debug("Figure updated... I hope.")
|
||||||
self.report.add_result(report)
|
return report
|
||||||
|
|
||||||
def convert_data_list_to_df(self, input_df: list[dict]) -> DataFrame:
|
def convert_data_list_to_df(self, input_df: list[dict]) -> DataFrame:
|
||||||
"""
|
"""
|
||||||
@@ -266,8 +269,65 @@ class ControlsViewer(QWidget):
|
|||||||
df, previous_dates = self.check_date(df, item, previous_dates)
|
df, previous_dates = self.check_date(df, item, previous_dates)
|
||||||
return df, previous_dates
|
return df, previous_dates
|
||||||
|
|
||||||
|
def prep_df(self, ctx: Settings, df: DataFrame) -> DataFrame:
|
||||||
|
"""
|
||||||
|
Constructs figures based on parsed pandas dataframe.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
ctx (Settings): settings passed down from gui
|
||||||
|
df (pd.DataFrame): input dataframe
|
||||||
|
ytitle (str | None, optional): title for the y-axis. Defaults to None.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Figure: Plotly figure
|
||||||
|
"""
|
||||||
|
# from backend.excel import drop_reruns_from_df
|
||||||
|
# converts starred genera to normal and splits off list of starred
|
||||||
|
genera = []
|
||||||
|
if df.empty:
|
||||||
|
return None
|
||||||
|
for item in df['genus'].to_list():
|
||||||
|
try:
|
||||||
|
if item[-1] == "*":
|
||||||
|
genera.append(item[-1])
|
||||||
|
else:
|
||||||
|
genera.append("")
|
||||||
|
except IndexError:
|
||||||
|
genera.append("")
|
||||||
|
df['genus'] = df['genus'].replace({'\*': ''}, regex=True).replace({"NaN": "Unknown"})
|
||||||
|
df['genera'] = genera
|
||||||
|
# NOTE: remove original runs, using reruns if applicable
|
||||||
|
df = self.drop_reruns_from_df(ctx=ctx, df=df)
|
||||||
|
# NOTE: sort by and exclude from
|
||||||
|
sorts = ['submitted_date', "target", "genus"]
|
||||||
|
exclude = ['name', 'genera']
|
||||||
|
modes = [item for item in df.columns if item not in sorts and item not in exclude] # and "_hashes" not in item]
|
||||||
|
# NOTE: Set descending for any columns that have "{mode}" in the header.
|
||||||
|
ascending = [False if item == "target" else True for item in sorts]
|
||||||
|
df = df.sort_values(by=sorts, ascending=ascending)
|
||||||
|
# logger.debug(df[df.isna().any(axis=1)])
|
||||||
|
# NOTE: actual chart construction is done by
|
||||||
|
return df, modes
|
||||||
|
|
||||||
|
def drop_reruns_from_df(self, ctx: Settings, df: DataFrame) -> DataFrame:
|
||||||
|
"""
|
||||||
|
Removes semi-duplicates from dataframe after finding sequencing repeats.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
settings (dict): settings passed from gui
|
||||||
|
df (DataFrame): initial dataframe
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
DataFrame: dataframe with originals removed in favour of repeats.
|
||||||
|
"""
|
||||||
|
if 'rerun_regex' in ctx:
|
||||||
|
sample_names = get_unique_values_in_df_column(df, column_name="name")
|
||||||
|
rerun_regex = re.compile(fr"{ctx.rerun_regex}")
|
||||||
|
for sample in sample_names:
|
||||||
|
if rerun_regex.search(sample):
|
||||||
|
first_run = re.sub(rerun_regex, "", sample)
|
||||||
|
df = df.drop(df[df.name == first_run].index)
|
||||||
|
return df
|
||||||
|
|
||||||
|
|
||||||
class ControlsDatePicker(QWidget):
|
class ControlsDatePicker(QWidget):
|
||||||
|
|||||||
@@ -97,10 +97,10 @@ class RoleComboBox(QWidget):
|
|||||||
self.layout = QGridLayout()
|
self.layout = QGridLayout()
|
||||||
self.role = role
|
self.role = role
|
||||||
self.check = QCheckBox()
|
self.check = QCheckBox()
|
||||||
if role.name in used:
|
# if role.name in used:
|
||||||
self.check.setChecked(False)
|
self.check.setChecked(False)
|
||||||
else:
|
# else:
|
||||||
self.check.setChecked(True)
|
# self.check.setChecked(True)
|
||||||
self.check.stateChanged.connect(self.toggle_checked)
|
self.check.stateChanged.connect(self.toggle_checked)
|
||||||
self.box = QComboBox()
|
self.box = QComboBox()
|
||||||
self.box.setMaximumWidth(200)
|
self.box.setMaximumWidth(200)
|
||||||
@@ -157,6 +157,7 @@ class RoleComboBox(QWidget):
|
|||||||
widget.setMinimumWidth(200)
|
widget.setMinimumWidth(200)
|
||||||
widget.setMaximumWidth(200)
|
widget.setMaximumWidth(200)
|
||||||
self.layout.addWidget(widget, 0, 4)
|
self.layout.addWidget(widget, 0, 4)
|
||||||
|
widget.setEnabled(self.check.isChecked())
|
||||||
|
|
||||||
def parse_form(self) -> PydEquipment | None:
|
def parse_form(self) -> PydEquipment | None:
|
||||||
"""
|
"""
|
||||||
@@ -190,7 +191,4 @@ class RoleComboBox(QWidget):
|
|||||||
case QCheckBox():
|
case QCheckBox():
|
||||||
continue
|
continue
|
||||||
case _:
|
case _:
|
||||||
if self.check.isChecked():
|
widget.setEnabled(self.check.isChecked())
|
||||||
widget.setEnabled(True)
|
|
||||||
else:
|
|
||||||
widget.setEnabled(False)
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
'''
|
"""
|
||||||
Webview to show submission and sample details.
|
Webview to show submission and sample details.
|
||||||
'''
|
"""
|
||||||
from PyQt6.QtWidgets import (QDialog, QPushButton, QVBoxLayout,
|
from PyQt6.QtWidgets import (QDialog, QPushButton, QVBoxLayout,
|
||||||
QDialogButtonBox, QTextEdit)
|
QDialogButtonBox, QTextEdit)
|
||||||
from PyQt6.QtWebEngineWidgets import QWebEngineView
|
from PyQt6.QtWebEngineWidgets import QWebEngineView
|
||||||
@@ -84,7 +84,7 @@ class SubmissionDetails(QDialog):
|
|||||||
if isinstance(submission, str):
|
if isinstance(submission, str):
|
||||||
submission = BasicSubmission.query(rsl_plate_num=submission)
|
submission = BasicSubmission.query(rsl_plate_num=submission)
|
||||||
self.base_dict = submission.to_dict(full_data=True)
|
self.base_dict = submission.to_dict(full_data=True)
|
||||||
# logger.debug(f"Submission details data:\n{pformat({k:v for k,v in self.base_dict.items() if k != 'samples'})}")
|
logger.debug(f"Submission details data:\n{pformat({k:v for k,v in self.base_dict.items() if k == 'reagents'})}")
|
||||||
# NOTE: don't want id
|
# NOTE: don't want id
|
||||||
self.base_dict = submission.finalize_details(self.base_dict)
|
self.base_dict = submission.finalize_details(self.base_dict)
|
||||||
# logger.debug(f"Creating barcode.")
|
# logger.debug(f"Creating barcode.")
|
||||||
|
|||||||
@@ -134,10 +134,10 @@ class SubmissionsSheet(QTableView):
|
|||||||
"""
|
"""
|
||||||
Pull extraction logs into the db
|
Pull extraction logs into the db
|
||||||
"""
|
"""
|
||||||
self.report = Report()
|
report = Report()
|
||||||
self.link_extractions_function()
|
result = self.link_extractions_function()
|
||||||
self.report.add_result(self.report)
|
report.add_result(result)
|
||||||
return self.report
|
return report
|
||||||
|
|
||||||
def link_extractions_function(self):
|
def link_extractions_function(self):
|
||||||
"""
|
"""
|
||||||
@@ -149,6 +149,7 @@ class SubmissionsSheet(QTableView):
|
|||||||
Returns:
|
Returns:
|
||||||
Tuple[QMainWindow, dict]: Collection of new main app window and result dict
|
Tuple[QMainWindow, dict]: Collection of new main app window and result dict
|
||||||
"""
|
"""
|
||||||
|
report = Report()
|
||||||
fname = select_open_file(self, file_extension="csv")
|
fname = select_open_file(self, file_extension="csv")
|
||||||
with open(fname.__str__(), 'r') as f:
|
with open(fname.__str__(), 'r') as f:
|
||||||
# split csv on commas
|
# split csv on commas
|
||||||
@@ -178,17 +179,18 @@ class SubmissionsSheet(QTableView):
|
|||||||
continue
|
continue
|
||||||
sub.set_attribute('extraction_info', new_run)
|
sub.set_attribute('extraction_info', new_run)
|
||||||
sub.save()
|
sub.save()
|
||||||
self.report.add_result(Result(msg=f"We added {count} logs to the database.", status='Information'))
|
report.add_result(Result(msg=f"We added {count} logs to the database.", status='Information'))
|
||||||
|
return report
|
||||||
|
|
||||||
@report_result
|
@report_result
|
||||||
def link_pcr(self):
|
def link_pcr(self):
|
||||||
"""
|
"""
|
||||||
Pull pcr logs into the db
|
Pull pcr logs into the db
|
||||||
"""
|
"""
|
||||||
self.link_pcr_function()
|
report = Report()
|
||||||
self.app.report.add_result(self.report)
|
result = self.link_pcr_function()
|
||||||
self.report = Report()
|
report.add_result(result)
|
||||||
return self.report
|
return report
|
||||||
|
|
||||||
def link_pcr_function(self):
|
def link_pcr_function(self):
|
||||||
"""
|
"""
|
||||||
@@ -200,6 +202,7 @@ class SubmissionsSheet(QTableView):
|
|||||||
Returns:
|
Returns:
|
||||||
Tuple[QMainWindow, dict]: Collection of new main app window and result dict
|
Tuple[QMainWindow, dict]: Collection of new main app window and result dict
|
||||||
"""
|
"""
|
||||||
|
report = Report()
|
||||||
fname = select_open_file(self, file_extension="csv")
|
fname = select_open_file(self, file_extension="csv")
|
||||||
with open(fname.__str__(), 'r') as f:
|
with open(fname.__str__(), 'r') as f:
|
||||||
# NOTE: split csv rows on comma
|
# NOTE: split csv rows on comma
|
||||||
@@ -226,16 +229,18 @@ class SubmissionsSheet(QTableView):
|
|||||||
sub.set_attribute('pcr_info', new_run)
|
sub.set_attribute('pcr_info', new_run)
|
||||||
# NOTE: check if pcr_info already exists
|
# NOTE: check if pcr_info already exists
|
||||||
sub.save()
|
sub.save()
|
||||||
self.report.add_result(Result(msg=f"We added {count} logs to the database.", status='Information'))
|
report.add_result(Result(msg=f"We added {count} logs to the database.", status='Information'))
|
||||||
|
return report
|
||||||
|
|
||||||
@report_result
|
@report_result
|
||||||
def generate_report(self, *args):
|
def generate_report(self, *args):
|
||||||
"""
|
"""
|
||||||
Make a report
|
Make a report
|
||||||
"""
|
"""
|
||||||
self.report = Report()
|
report = Report()
|
||||||
self.generate_report_function()
|
result = self.generate_report_function()
|
||||||
return self.report
|
report.add_result(result)
|
||||||
|
return report
|
||||||
|
|
||||||
def generate_report_function(self):
|
def generate_report_function(self):
|
||||||
"""
|
"""
|
||||||
@@ -255,4 +260,4 @@ class SubmissionsSheet(QTableView):
|
|||||||
fname = select_save_file(obj=self, default_name=f"Submissions_Report_{info['start_date']}-{info['end_date']}.docx", extension="docx")
|
fname = select_save_file(obj=self, default_name=f"Submissions_Report_{info['start_date']}-{info['end_date']}.docx", extension="docx")
|
||||||
rp = ReportMaker(start_date=info['start_date'], end_date=info['end_date'])
|
rp = ReportMaker(start_date=info['start_date'], end_date=info['end_date'])
|
||||||
rp.write_report(filename=fname, obj=self)
|
rp.write_report(filename=fname, obj=self)
|
||||||
self.report.add_result(report)
|
return report
|
||||||
|
|||||||
@@ -121,8 +121,7 @@ class SubmissionFormContainer(QWidget):
|
|||||||
# logger.debug(f"Attempting to parse file: {fname}")
|
# logger.debug(f"Attempting to parse file: {fname}")
|
||||||
if not fname.exists():
|
if not fname.exists():
|
||||||
report.add_result(Result(msg=f"File {fname.__str__()} not found.", status="critical"))
|
report.add_result(Result(msg=f"File {fname.__str__()} not found.", status="critical"))
|
||||||
self.report.add_result(report)
|
return report
|
||||||
return
|
|
||||||
# NOTE: create sheetparser using excel sheet and context from gui
|
# NOTE: create sheetparser using excel sheet and context from gui
|
||||||
try:
|
try:
|
||||||
self.prsr = SheetParser(filepath=fname)
|
self.prsr = SheetParser(filepath=fname)
|
||||||
@@ -136,7 +135,7 @@ class SubmissionFormContainer(QWidget):
|
|||||||
# logger.debug(f"Pydantic result: \n\n{pformat(self.pyd)}\n\n")
|
# logger.debug(f"Pydantic result: \n\n{pformat(self.pyd)}\n\n")
|
||||||
self.form = self.pyd.to_form(parent=self)
|
self.form = self.pyd.to_form(parent=self)
|
||||||
self.layout().addWidget(self.form)
|
self.layout().addWidget(self.form)
|
||||||
self.report.add_result(report)
|
return report
|
||||||
# logger.debug(f"Outgoing report: {self.report.results}")
|
# logger.debug(f"Outgoing report: {self.report.results}")
|
||||||
# logger.debug(f"All attributes of submission container:\n{pformat(self.__dict__)}")
|
# logger.debug(f"All attributes of submission container:\n{pformat(self.__dict__)}")
|
||||||
|
|
||||||
|
|||||||
@@ -844,7 +844,7 @@ def html_to_pdf(html: str, output_file: Path | str):
|
|||||||
|
|
||||||
def remove_key_from_list_of_dicts(input: list, key: str) -> list:
|
def remove_key_from_list_of_dicts(input: list, key: str) -> list:
|
||||||
"""
|
"""
|
||||||
Removes a key from all dictionaries in a list
|
Removes a key from all dictionaries in a list of dictionaries
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
input (list): Input list of dicts
|
input (list): Input list of dicts
|
||||||
@@ -939,5 +939,4 @@ def report_result(func):
|
|||||||
logger.error(result.msg)
|
logger.error(result.msg)
|
||||||
logger.debug(f"Returning: {output}")
|
logger.debug(f"Returning: {output}")
|
||||||
return output
|
return output
|
||||||
|
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|||||||
Reference in New Issue
Block a user