Adaptations to allow for stand alone scripts.

This commit is contained in:
Landon Wark
2024-03-18 09:47:20 -05:00
parent 2fd8a0b9b6
commit e35081392e
7 changed files with 34 additions and 20 deletions

View File

@@ -339,7 +339,6 @@ class Reagent(BaseClass):
submissions = association_proxy("reagent_submission_associations", "submission") #: Association proxy to SubmissionSampleAssociation.samples submissions = association_proxy("reagent_submission_associations", "submission") #: Association proxy to SubmissionSampleAssociation.samples
def __repr__(self): def __repr__(self):
if self.name != None: if self.name != None:
return f"<Reagent({self.name}-{self.lot})>" return f"<Reagent({self.name}-{self.lot})>"

View File

@@ -42,6 +42,7 @@ class Organization(BaseClass):
@classmethod @classmethod
@setup_lookup @setup_lookup
def query(cls, def query(cls,
id:int|None=None,
name:str|None=None, name:str|None=None,
limit:int=0, limit:int=0,
) -> Organization|List[Organization]: ) -> Organization|List[Organization]:
@@ -56,6 +57,12 @@ class Organization(BaseClass):
Organization|List[Organization]: Organization|List[Organization]:
""" """
query: Query = cls.__database_session__.query(cls) query: Query = cls.__database_session__.query(cls)
match id:
case int():
query = query.filter(cls.id==id)
limit = 1
case _:
pass
match name: match name:
case str(): case str():
# logger.debug(f"Looking up organization with name: {name}") # logger.debug(f"Looking up organization with name: {name}")

View File

@@ -1242,9 +1242,9 @@ class Wastewater(BasicSubmission):
outstr = super().enforce_name(instr=instr, data=data) outstr = super().enforce_name(instr=instr, data=data)
try: try:
outstr = re.sub(r"PCR(-|_)", "", outstr) outstr = re.sub(r"PCR(-|_)", "", outstr)
except AttributeError as e: except (AttributeError, TypeError) as e:
logger.error(f"Problem using regex: {e}") logger.error(f"Problem using regex: {e}")
outstr = RSLNamer.construct_new_plate_name(instr=outstr) outstr = RSLNamer.construct_new_plate_name(data=data)
outstr = outstr.replace("RSLWW", "RSL-WW") outstr = outstr.replace("RSLWW", "RSL-WW")
outstr = re.sub(r"WW(\d{4})", r"WW-\1", outstr, flags=re.IGNORECASE) outstr = re.sub(r"WW(\d{4})", r"WW-\1", outstr, flags=re.IGNORECASE)
outstr = re.sub(r"(\d{4})-(\d{2})-(\d{2})", r"\1\2\3", outstr) outstr = re.sub(r"(\d{4})-(\d{2})-(\d{2})", r"\1\2\3", outstr)

View File

@@ -274,7 +274,7 @@ class SampleParser(object):
object to pull data for samples in excel sheet and construct individual sample objects object to pull data for samples in excel sheet and construct individual sample objects
""" """
def __init__(self, xl:pd.ExcelFile, submission_type:str) -> None: def __init__(self, xl:pd.ExcelFile, submission_type:str, sample_map:dict|None=None) -> None:
""" """
convert sample sub-dataframe to dictionary of records convert sample sub-dataframe to dictionary of records
@@ -286,7 +286,7 @@ class SampleParser(object):
self.samples = [] self.samples = []
self.xl = xl self.xl = xl
self.submission_type = submission_type self.submission_type = submission_type
sample_info_map = self.fetch_sample_info_map(submission_type=submission_type) sample_info_map = self.fetch_sample_info_map(submission_type=submission_type, sample_map=sample_map)
logger.debug(f"sample_info_map: {sample_info_map}") logger.debug(f"sample_info_map: {sample_info_map}")
self.plate_map = self.construct_plate_map(plate_map_location=sample_info_map['plate_map']) self.plate_map = self.construct_plate_map(plate_map_location=sample_info_map['plate_map'])
logger.debug(f"plate_map: {self.plate_map}") logger.debug(f"plate_map: {self.plate_map}")
@@ -298,7 +298,7 @@ class SampleParser(object):
if isinstance(self.lookup_table, pd.DataFrame): if isinstance(self.lookup_table, pd.DataFrame):
self.parse_lookup_table() self.parse_lookup_table()
def fetch_sample_info_map(self, submission_type:str) -> dict: def fetch_sample_info_map(self, submission_type:str, sample_map:dict|None=None) -> dict:
""" """
Gets info locations in excel book for submission type. Gets info locations in excel book for submission type.
@@ -311,7 +311,10 @@ class SampleParser(object):
logger.debug(f"Looking up submission type: {submission_type}") logger.debug(f"Looking up submission type: {submission_type}")
submission_type = SubmissionType.query(name=submission_type) submission_type = SubmissionType.query(name=submission_type)
logger.debug(f"info_map: {pformat(submission_type.info_map)}") logger.debug(f"info_map: {pformat(submission_type.info_map)}")
if sample_map is None:
sample_info_map = submission_type.info_map['samples'] sample_info_map = submission_type.info_map['samples']
else:
sample_info_map = sample_map
self.custom_sub_parser = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=submission_type.name).parse_samples self.custom_sub_parser = BasicSubmission.find_polymorphic_subclass(polymorphic_identity=submission_type.name).parse_samples
self.custom_sample_parser = BasicSample.find_polymorphic_subclass(polymorphic_identity=f"{submission_type.name} Sample").parse_sample self.custom_sample_parser = BasicSample.find_polymorphic_subclass(polymorphic_identity=f"{submission_type.name} Sample").parse_sample
return sample_info_map return sample_info_map

View File

@@ -518,7 +518,7 @@ class PydSubmission(BaseModel, extra='allow'):
case "samples": case "samples":
for sample in self.samples: for sample in self.samples:
sample, associations, _ = sample.toSQL(submission=instance) sample, associations, _ = sample.toSQL(submission=instance)
logger.debug(f"Sample SQL object to be added to submission: {sample.__dict__}") # logger.debug(f"Sample SQL object to be added to submission: {sample.__dict__}")
for assoc in associations: for assoc in associations:
instance.submission_sample_associations.append(assoc) instance.submission_sample_associations.append(assoc)
case "equipment": case "equipment":
@@ -534,7 +534,6 @@ class PydSubmission(BaseModel, extra='allow'):
association.save() association.save()
logger.debug(f"Equipment association SQL object to be added to submission: {association.__dict__}") logger.debug(f"Equipment association SQL object to be added to submission: {association.__dict__}")
instance.submission_equipment_associations.append(association) instance.submission_equipment_associations.append(association)
case _: case _:
try: try:
instance.set_attribute(key=key, value=value) instance.set_attribute(key=key, value=value)
@@ -548,7 +547,10 @@ class PydSubmission(BaseModel, extra='allow'):
instance.calculate_base_cost() instance.calculate_base_cost()
except (TypeError, AttributeError) as e: except (TypeError, AttributeError) as e:
logger.debug(f"Looks like that kit doesn't have cost breakdown yet due to: {e}, using full plate cost.") logger.debug(f"Looks like that kit doesn't have cost breakdown yet due to: {e}, using full plate cost.")
try:
instance.run_cost = instance.extraction_kit.cost_per_run instance.run_cost = instance.extraction_kit.cost_per_run
except AttributeError:
instance.run_cost = 0
logger.debug(f"Calculated base run cost of: {instance.run_cost}") logger.debug(f"Calculated base run cost of: {instance.run_cost}")
# Apply any discounts that are applicable for client and kit. # Apply any discounts that are applicable for client and kit.
try: try:

View File

@@ -469,11 +469,11 @@ class SubmissionFormWidget(QWidget):
Args: Args:
fname (Path | None, optional): Input filename. Defaults to None. fname (Path | None, optional): Input filename. Defaults to None.
""" """
pyd = self.parse_form() self.parse_form()
if isinstance(fname, bool) or fname == None: if isinstance(fname, bool) or fname == None:
fname = select_save_file(obj=self, default_name=pyd.construct_filename(), extension="csv") fname = select_save_file(obj=self, default_name=self.pyd.construct_filename(), extension="csv")
try: try:
pyd.csv.to_csv(fname.__str__(), index=False) self.pyd.csv.to_csv(fname.__str__(), index=False)
except PermissionError: except PermissionError:
logger.debug(f"Could not get permissions to {fname}. Possibly the request was cancelled.") logger.debug(f"Could not get permissions to {fname}. Possibly the request was cancelled.")

View File

@@ -117,12 +117,15 @@ def check_regex_match(pattern:str, check:str) -> bool:
return False return False
def get_first_blank_df_row(df:pd.DataFrame) -> int: def get_first_blank_df_row(df:pd.DataFrame) -> int:
#First, find NaN entries in first column """
# blank_row_bool = df.iloc[:,2].isna() For some reason I need a whole function for this.
# logger.debug(f"Blank row bool: {blank_row_bool}")
# #Next, get index of first NaN entry Args:
# blank_row_index = [i for i, x in enumerate(blank_row_bool) if x][0] df (pd.DataFrame): Input dataframe.
# return blank_row_index
Returns:
int: Index of the row after the last used row.
"""
return df.shape[0] + 1 return df.shape[0] + 1
# Settings # Settings