_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q21300
Import
train
def Import(context, request, instrumentname='sysmex_xs_500i'): """ Sysmex XS - 500i analysis results """ # I don't really know how this file works, for this reason I added an 'Analysis Service selector'. # If non Analysis Service is selected, each 'data' column will be interpreted as a different Analysis Service. In # the case that an Analysis Service was selected, all the data columns would be interpreted as different data from # an unique Analysis Service. formitems = getForm(instrumentname, request) infile = formitems['infile'] fileformat = formitems['fileformat'] artoapply = formitems['artoapply'] override = formitems['override'] instrument = formitems['instrument'] errors = [] logs = [] warns = [] # Load the most suitable parser according to file extension/options/etc... parser = None if not hasattr(infile, 'filename'): errors.append(_("No file selected")) if fileformat == 'csv': # Get the Analysis Service selected, if there is one. analysis = request.form.get('analysis_service', None) if analysis: # Get default result key defaultresult = request.form.get('default_result', None) # Rise an error if default result is missing. parser = SysmexXS500iCSVParser(infile, analysis, defaultresult) if defaultresult \ else errors.append(t(_("You should introduce a default result key.", mapping={"fileformat": fileformat}))) else: parser = SysmexXS500iCSVParser(infile) else: errors.append(t(_("Unrecognized file format ${fileformat}", mapping={"fileformat": fileformat}))) if parser: # Load the importer status = ['sample_received', 'attachment_due', 'to_be_verified'] if artoapply == 'received': status = ['sample_received'] elif artoapply == 'received_tobeverified': status = ['sample_received', 'attachment_due', 'to_be_verified'] over = [False, False] if override == 'nooverride': over = [False, False] elif override == 'override': over = [True, False] elif override == 'overrideempty': over = [True, True] importer = SysmexXS500iImporter(parser=parser, context=context, allowed_ar_states=status, allowed_analysis_states=None, override=over, instrument_uid=instrument) tbex = '' try: importer.process() except: tbex = traceback.format_exc() errors = importer.errors logs = importer.logs warns = importer.warns if tbex: errors.append(tbex) results = {'errors': errors, 'log': logs, 'warns': warns} return json.dumps(results)
python
{ "resource": "" }
q21301
notify_rejection
train
def notify_rejection(analysisrequest): """ Notifies via email that a given Analysis Request has been rejected. The notification is sent to the Client contacts assigned to the Analysis Request. :param analysisrequest: Analysis Request to which the notification refers :returns: true if success """ # We do this imports here to avoid circular dependencies until we deal # better with this notify_rejection thing. from bika.lims.browser.analysisrequest.reject import \ AnalysisRequestRejectPdfView, AnalysisRequestRejectEmailView arid = analysisrequest.getId() # This is the template to render for the pdf that will be either attached # to the email and attached the the Analysis Request for further access tpl = AnalysisRequestRejectPdfView(analysisrequest, analysisrequest.REQUEST) html = tpl.template() html = safe_unicode(html).encode('utf-8') filename = '%s-rejected' % arid pdf_fn = tempfile.mktemp(suffix=".pdf") pdf = createPdf(htmlreport=html, outfile=pdf_fn) if pdf: # Attach the pdf to the Analysis Request attid = analysisrequest.aq_parent.generateUniqueId('Attachment') att = _createObjectByType( "Attachment", analysisrequest.aq_parent, attid) att.setAttachmentFile(open(pdf_fn)) # Awkward workaround to rename the file attf = att.getAttachmentFile() attf.filename = '%s.pdf' % filename att.setAttachmentFile(attf) att.unmarkCreationFlag() renameAfterCreation(att) analysisrequest.addAttachment(att) os.remove(pdf_fn) # This is the message for the email's body tpl = AnalysisRequestRejectEmailView( analysisrequest, analysisrequest.REQUEST) html = tpl.template() html = safe_unicode(html).encode('utf-8') # compose and send email. mailto = [] lab = analysisrequest.bika_setup.laboratory mailfrom = formataddr((encode_header(lab.getName()), lab.getEmailAddress())) mailsubject = _('%s has been rejected') % arid contacts = [analysisrequest.getContact()] + analysisrequest.getCCContact() for contact in contacts: name = to_utf8(contact.getFullname()) email = to_utf8(contact.getEmailAddress()) if email: mailto.append(formataddr((encode_header(name), email))) if not mailto: return False mime_msg = MIMEMultipart('related') mime_msg['Subject'] = mailsubject mime_msg['From'] = mailfrom mime_msg['To'] = ','.join(mailto) mime_msg.preamble = 'This is a multi-part MIME message.' msg_txt = MIMEText(html, _subtype='html') mime_msg.attach(msg_txt) if pdf: attachPdf(mime_msg, pdf, filename) try: host = getToolByName(analysisrequest, 'MailHost') host.send(mime_msg.as_string(), immediate=True) except: logger.warning( "Email with subject %s was not sent (SMTP connection error)" % mailsubject) return True
python
{ "resource": "" }
q21302
fields_to_dict
train
def fields_to_dict(obj, skip_fields=None): """ Generates a dictionary with the field values of the object passed in, where keys are the field names. Skips computed fields """ data = {} obj = api.get_object(obj) for field_name, field in api.get_fields(obj).items(): if skip_fields and field_name in skip_fields: continue if field.type == "computed": continue data[field_name] = field.get(obj) return data
python
{ "resource": "" }
q21303
update_permissions_rejected_analysis_requests
train
def update_permissions_rejected_analysis_requests(): """ Maps and updates the permissions for rejected analysis requests so lab clerks, clients, owners and RegulatoryInspector can see rejected analysis requests on lists. :return: None """ workflow_tool = api.get_tool("portal_workflow") workflow = workflow_tool.getWorkflowById('bika_ar_workflow') catalog = api.get_tool(CATALOG_ANALYSIS_REQUEST_LISTING) brains = catalog(review_state='rejected') counter = 0 total = len(brains) logger.info( "Changing permissions for rejected analysis requests. " + "Number of rejected analysis requests: {0}".format(total)) for brain in brains: if 'LabClerk' not in brain.allowedRolesAndUsers: if counter % 100 == 0: logger.info( "Changing permissions for rejected analysis requests: " + "{0}/{1}".format(counter, total)) obj = api.get_object(brain) workflow.updateRoleMappingsFor(obj) obj.reindexObject() counter += 1 logger.info( "Changed permissions for rejected analysis requests: " + "{0}/{1}".format(counter, total))
python
{ "resource": "" }
q21304
migrate_to_blob
train
def migrate_to_blob(context, portal_type, query={}, remove_old_value=True): """Migrates FileFields fields to blob ones for a given portal_type. The wueries are done against 'portal_catalog', 'uid_catalog' and 'reference_catalog' :param context: portal root object as context :param query: an expression to filter the catalog by other filters than the portal_type. :param portal_type: The portal type name the migration is migrating *from* """ migrator = makeMigrator( context, portal_type, remove_old_value=remove_old_value) walker = BikaCustomQueryWalker(context, migrator, query=query) savepoint(optimistic=True) walker.go() return walker.getOutput()
python
{ "resource": "" }
q21305
makeMigrator
train
def makeMigrator(context, portal_type, remove_old_value=True): """ generate a migrator for the given at-based portal type """ meta_type = portal_type class BlobMigrator(BaseInlineMigrator): """in-place migrator for archetypes based content that copies file/image data from old non-blob fields to new fields with the same name provided by archetypes.schemaextender. see `plone3 to 4 migration guide`__ .. __: https://plone.org/documentation/manual/upgrade-guide/version /upgrading-plone-3-x-to-4.0/updating-add-on-products-for-plone-4.0 /use-plone.app.blob-based-blob-storage """ src_portal_type = portal_type src_meta_type = meta_type dst_portal_type = portal_type dst_meta_type = meta_type fields = [] def getFields(self, obj): if not self.fields: # get the blob fields to migrate from the first object for field in ISchema(obj).fields(): if IBlobField.providedBy(field): self.fields.append(field.getName()) return self.fields @property def fields_map(self): fields = self.getFields(None) return dict([(name, None) for name in fields]) def migrate_data(self): fields = self.getFields(self.obj) for name in fields: # access old field by not using schemaextender oldfield = self.obj.schema[name] is_imagefield = False if hasattr(oldfield, 'removeScales'): # clean up old image scales is_imagefield = True oldfield.removeScales(self.obj) value = oldfield.get(self.obj) if not value: # no image/file data: don't copy it over to blob field # this way it's save to run migration multiple times w/o # overwriting existing data continue if isinstance(aq_base(value), BlobWrapper): # already a blob field, no need to migrate it continue # access new field via schemaextender field = self.obj.getField(name) field.getMutator(self.obj)(value) if remove_old_value: # Remove data from old field to not end up with data # stored twice - in ZODB and blobstorage if is_imagefield: oldfield.set(self.obj, 'DELETE_IMAGE') else: oldfield.set(self.obj, 'DELETE_FILE') def last_migrate_reindex(self): # The original method checks the modification date in order to # keep the old one, but we don't care about it. self.obj.reindexObject() return BlobMigrator
python
{ "resource": "" }
q21306
BikaCustomQueryWalker.walk
train
def walk(self): """ Walks around and returns all objects which needs migration It does exactly the same as the original method, but add some progress loggers. :return: objects (with acquisition wrapper) that needs migration :rtype: generator """ catalog = self.catalog query = self.additionalQuery.copy() query['portal_type'] = self.src_portal_type query['meta_type'] = self.src_meta_type if HAS_LINGUA_PLONE and 'Language' in catalog.indexes(): query['Language'] = 'all' brains = catalog(query) limit = getattr(self, 'limit', False) if limit: brains = brains[:limit] obj_num_total = len(brains) logger.info('{} {} objects will be migrated walking through {}' .format(obj_num_total, self.src_portal_type, catalog.id)) counter = 0 for brain in brains: if counter % 100 == 0: logger.info('Progress: {} objects have been migrated out of {}' .format(counter, obj_num_total)) try: obj = brain.getObject() except AttributeError: LOG.error("Couldn't access %s" % brain.getPath()) continue if self.callBefore is not None and callable(self.callBefore): if not self.callBefore(obj, **self.kwargs): continue try: state = obj._p_changed except Exception: state = 0 if obj is not None: yield obj # safe my butt if state is None: obj._p_deactivate() counter += 1 if obj_num_total == counter: logger.info( 'Progress: {} objects have been migrated out of {}' .format(counter, obj_num_total))
python
{ "resource": "" }
q21307
UpgradeUtils.refreshCatalogs
train
def refreshCatalogs(self): """ It reindexes the modified catalogs but, while cleanAndRebuildCatalogs recatalogs all objects in the database, this method only reindexes over the already cataloged objects. If a metacolumn is added it refreshes the catalog, if only a new index is added, it reindexes only those new indexes. """ to_refresh = self.refreshcatalog[:] to_reindex = self.reindexcatalog.keys() to_reindex = to_reindex[:] done = [] # Start reindexing the catalogs with new columns for catalog_to_refresh in to_refresh: logger.info( 'Catalog {0} refreshing started'.format(catalog_to_refresh)) catalog = getToolByName(self.portal, catalog_to_refresh) handler = ZLogHandler(self.pgthreshold) catalog.refreshCatalog(pghandler=handler) logger.info('Catalog {0} refreshed'.format(catalog_to_refresh)) transaction.commit() done.append(catalog_to_refresh) # Now the catalogs which only need reindxing for catalog_to_reindex in to_reindex: if catalog_to_reindex in done: continue logger.info( 'Catalog {0} reindexing started'.format(catalog_to_reindex)) catalog = getToolByName( self.portal, catalog_to_reindex) indexes = self.reindexcatalog[catalog_to_reindex] handler = ZLogHandler(self.pgthreshold) catalog.reindexIndex(indexes, None, pghandler=handler) logger.info('Catalog {0} reindexed'.format(catalog_to_reindex)) transaction.commit() done.append(catalog_to_reindex)
python
{ "resource": "" }
q21308
UpgradeUtils.recursiveUpdateRoleMappings
train
def recursiveUpdateRoleMappings(self, ob, wfs=None, commit_window=1000): """Code taken from Products.CMFPlone.WorkflowTool This version adds some commits and loggins """ wf_tool = api.get_tool("portal_workflow") if wfs is None: wfs = {} for id in wf_tool.objectIds(): wf = wf_tool.getWorkflowById(id) if hasattr(aq_base(wf), 'updateRoleMappingsFor'): wfs[id] = wf # Returns a count of updated objects. count = 0 wf_ids = wf_tool.getChainFor(ob) if wf_ids: changed = 0 for wf_id in wf_ids: wf = wfs.get(wf_id, None) if wf is not None: did = wf.updateRoleMappingsFor(ob) if did: changed = 1 if changed: count = count + 1 if hasattr(aq_base(ob), 'reindexObject'): # Reindex security-related indexes try: ob.reindexObject(idxs=['allowedRolesAndUsers']) except TypeError: # Catch attempts to reindex portal_catalog. pass if hasattr(aq_base(ob), 'objectItems'): obs = ob.objectItems() if obs: committed = 0 logged = 0 for k, v in obs: if count - logged >= 100: logger.info( "Updating role mappings for {}: {}".format( repr(ob), count)) logged += count changed = getattr(v, '_p_changed', 0) processed = self.recursiveUpdateRoleMappings(v, wfs, commit_window) count += processed if changed is None: # Re-ghostify. v._p_deactivate() if count - committed >= commit_window: commit_transaction() committed += count return count
python
{ "resource": "" }
q21309
Worksheet.addAnalyses
train
def addAnalyses(self, analyses): """Adds a collection of analyses to the Worksheet at once """ actions_pool = ActionHandlerPool.get_instance() actions_pool.queue_pool() for analysis in analyses: self.addAnalysis(api.get_object(analysis)) actions_pool.resume()
python
{ "resource": "" }
q21310
Worksheet.removeAnalysis
train
def removeAnalysis(self, analysis): """ Unassigns the analysis passed in from the worksheet. Delegates to 'unassign' transition for the analysis passed in """ # We need to bypass the guard's check for current context! api.get_request().set("ws_uid", api.get_uid(self)) if analysis.getWorksheet() == self: doActionFor(analysis, "unassign")
python
{ "resource": "" }
q21311
Worksheet.addToLayout
train
def addToLayout(self, analysis, position=None): """ Adds the analysis passed in to the worksheet's layout """ # TODO Redux layout = self.getLayout() container_uid = self.get_container_for(analysis) if IRequestAnalysis.providedBy(analysis) and \ not IDuplicateAnalysis.providedBy(analysis): container_uids = map(lambda slot: slot['container_uid'], layout) if container_uid in container_uids: position = [int(slot['position']) for slot in layout if slot['container_uid'] == container_uid][0] elif not position: used_positions = [0, ] + [int(slot['position']) for slot in layout] position = [pos for pos in range(1, max(used_positions) + 2) if pos not in used_positions][0] an_type = self.get_analysis_type(analysis) self.setLayout(layout + [{'position': position, 'type': an_type, 'container_uid': container_uid, 'analysis_uid': api.get_uid(analysis)}, ])
python
{ "resource": "" }
q21312
Worksheet.purgeLayout
train
def purgeLayout(self): """ Purges the layout of not assigned analyses """ uids = map(api.get_uid, self.getAnalyses()) layout = filter(lambda slot: slot.get("analysis_uid", None) in uids, self.getLayout()) self.setLayout(layout)
python
{ "resource": "" }
q21313
Worksheet._getInstrumentsVoc
train
def _getInstrumentsVoc(self): """ This function returns the registered instruments in the system as a vocabulary. The instruments are filtered by the selected method. """ cfilter = {'portal_type': 'Instrument', 'is_active': True} if self.getMethod(): cfilter['getMethodUIDs'] = {"query": self.getMethod().UID(), "operator": "or"} bsc = getToolByName(self, 'bika_setup_catalog') items = [('', 'No instrument')] + [ (o.UID, o.Title) for o in bsc(cfilter)] o = self.getInstrument() if o and o.UID() not in [i[0] for i in items]: items.append((o.UID(), o.Title())) items.sort(lambda x, y: cmp(x[1], y[1])) return DisplayList(list(items))
python
{ "resource": "" }
q21314
Worksheet.nextRefAnalysesGroupID
train
def nextRefAnalysesGroupID(self, reference): """ Returns the next ReferenceAnalysesGroupID for the given reference sample. Gets the last reference analysis registered in the system for the specified reference sample and increments in one unit the suffix. """ prefix = reference.id + "-" if not IReferenceSample.providedBy(reference): # Not a ReferenceSample, so this is a duplicate prefix = reference.id + "-D" bac = getToolByName(reference, 'bika_analysis_catalog') ids = bac.Indexes['getReferenceAnalysesGroupID'].uniqueValues() rr = re.compile("^" + prefix + "[\d+]+$") ids = [int(i.split(prefix)[1]) for i in ids if i and rr.match(i)] ids.sort() _id = ids[-1] if ids else 0 suffix = str(_id + 1).zfill(int(3)) if not IReferenceSample.providedBy(reference): # Not a ReferenceSample, so this is a duplicate suffix = str(_id + 1).zfill(2) return '%s%s' % (prefix, suffix)
python
{ "resource": "" }
q21315
Worksheet.get_suitable_slot_for_duplicate
train
def get_suitable_slot_for_duplicate(self, src_slot): """Returns the suitable position for a duplicate analysis, taking into account if there is a WorksheetTemplate assigned to this worksheet. By default, returns a new slot at the end of the worksheet unless there is a slot defined for a duplicate of the src_slot in the worksheet template layout not yet used. :param src_slot: :return: suitable slot position for a duplicate of src_slot """ slot_from = to_int(src_slot, 0) if slot_from < 1: return -1 # Are the analyses from src_slot suitable for duplicates creation? container = self.get_container_at(slot_from) if not container or not IAnalysisRequest.providedBy(container): # We cannot create duplicates from analyses other than routine ones, # those that belong to an Analysis Request. return -1 occupied = self.get_slot_positions(type='all') wst = self.getWorksheetTemplate() if not wst: # No worksheet template assigned, add a new slot at the end of # the worksheet with the duplicate there slot_to = max(occupied) + 1 return slot_to # If there is a match with the layout defined in the Worksheet # Template, use that slot instead of adding a new one at the end of # the worksheet layout = wst.getLayout() for pos in layout: if pos['type'] != 'd' or to_int(pos['dup']) != slot_from: continue slot_to = int(pos['pos']) if slot_to in occupied: # Not an empty slot continue # This slot is empty, use it instead of adding a new # slot at the end of the worksheet return slot_to # Add a new slot at the end of the worksheet, but take into account # that a worksheet template is assigned, so we need to take care to # not override slots defined by its layout occupied.append(len(layout)) slot_to = max(occupied) + 1 return slot_to
python
{ "resource": "" }
q21316
Worksheet.get_suitable_slot_for_reference
train
def get_suitable_slot_for_reference(self, reference): """Returns the suitable position for reference analyses, taking into account if there is a WorksheetTemplate assigned to this worksheet. By default, returns a new slot at the end of the worksheet unless there is a slot defined for a reference of the same type (blank or control) in the worksheet template's layout that hasn't been used yet. :param reference: ReferenceSample the analyses will be created from :return: suitable slot position for reference analyses """ if not IReferenceSample.providedBy(reference): return -1 occupied = self.get_slot_positions(type='all') or [0] wst = self.getWorksheetTemplate() if not wst: # No worksheet template assigned, add a new slot at the end of the # worksheet with the reference analyses there slot_to = max(occupied) + 1 return slot_to # If there is a match with the layout defined in the Worksheet Template, # use that slot instead of adding a new one at the end of the worksheet slot_type = reference.getBlank() and 'b' or 'c' layout = wst.getLayout() for pos in layout: if pos['type'] != slot_type: continue slot_to = int(pos['pos']) if slot_to in occupied: # Not an empty slot continue # This slot is empty, use it instead of adding a new slot at the end # of the worksheet return slot_to # Add a new slot at the end of the worksheet, but take into account # that a worksheet template is assigned, so we need to take care to # not override slots defined by its layout occupied.append(len(layout)) slot_to = max(occupied) + 1 return slot_to
python
{ "resource": "" }
q21317
Worksheet.get_duplicates_for
train
def get_duplicates_for(self, analysis): """Returns the duplicates from the current worksheet that were created by using the analysis passed in as the source :param analysis: routine analyses used as the source for the duplicates :return: a list of duplicates generated from the analysis passed in """ if not analysis: return list() uid = api.get_uid(analysis) return filter(lambda dup: api.get_uid(dup.getAnalysis()) == uid, self.getDuplicateAnalyses())
python
{ "resource": "" }
q21318
Worksheet.get_analyses_at
train
def get_analyses_at(self, slot): """Returns the list of analyses assigned to the slot passed in, sorted by the positions they have within the slot. :param slot: the slot where the analyses are located :type slot: int :return: a list of analyses """ # ensure we have an integer slot = to_int(slot) if slot < 1: return list() analyses = list() layout = self.getLayout() for pos in layout: layout_slot = to_int(pos['position']) uid = pos['analysis_uid'] if layout_slot != slot or not uid: continue analyses.append(api.get_object_by_uid(uid)) return analyses
python
{ "resource": "" }
q21319
Worksheet.get_container_at
train
def get_container_at(self, slot): """Returns the container object assigned to the slot passed in :param slot: the slot where the analyses are located :type slot: int :return: the container (analysis request, reference sample, etc.) """ # ensure we have an integer slot = to_int(slot) if slot < 1: return None layout = self.getLayout() for pos in layout: layout_slot = to_int(pos['position']) uid = pos['container_uid'] if layout_slot != slot or not uid: continue return api.get_object_by_uid(uid) return None
python
{ "resource": "" }
q21320
Worksheet.get_slot_positions
train
def get_slot_positions(self, type='a'): """Returns a list with the slots occupied for the type passed in. Allowed type of analyses are: 'a' (routine analysis) 'b' (blank analysis) 'c' (control) 'd' (duplicate) 'all' (all analyses) :param type: type of the analysis :return: list of slot positions """ if type not in ALLOWED_ANALYSES_TYPES and type != ALL_ANALYSES_TYPES: return list() layout = self.getLayout() slots = list() for pos in layout: if type != ALL_ANALYSES_TYPES and pos['type'] != type: continue slots.append(to_int(pos['position'])) # return a unique list of sorted slot positions return sorted(set(slots))
python
{ "resource": "" }
q21321
Worksheet.get_slot_position
train
def get_slot_position(self, container, type='a'): """Returns the slot where the analyses from the type and container passed in are located within the worksheet. :param container: the container in which the analyses are grouped :param type: type of the analysis :return: the slot position :rtype: int """ if not container or type not in ALLOWED_ANALYSES_TYPES: return None uid = api.get_uid(container) layout = self.getLayout() for pos in layout: if pos['type'] != type or pos['container_uid'] != uid: continue return to_int(pos['position']) return None
python
{ "resource": "" }
q21322
Worksheet.get_analysis_type
train
def get_analysis_type(self, instance): """Returns the string used in slots to differentiate amongst analysis types """ if IDuplicateAnalysis.providedBy(instance): return 'd' elif IReferenceAnalysis.providedBy(instance): return instance.getReferenceType() elif IRoutineAnalysis.providedBy(instance): return 'a' return None
python
{ "resource": "" }
q21323
Worksheet.get_container_for
train
def get_container_for(self, instance): """Returns the container id used in slots to group analyses """ if IReferenceAnalysis.providedBy(instance): return api.get_uid(instance.getSample()) return instance.getRequestUID()
python
{ "resource": "" }
q21324
Worksheet.get_slot_position_for
train
def get_slot_position_for(self, instance): """Returns the slot where the instance passed in is located. If not found, returns None """ uid = api.get_uid(instance) slot = filter(lambda s: s['analysis_uid'] == uid, self.getLayout()) if not slot: return None return to_int(slot[0]['position'])
python
{ "resource": "" }
q21325
Worksheet.resolve_available_slots
train
def resolve_available_slots(self, worksheet_template, type='a'): """Returns the available slots from the current worksheet that fits with the layout defined in the worksheet_template and type of analysis passed in. Allowed type of analyses are: 'a' (routine analysis) 'b' (blank analysis) 'c' (control) 'd' (duplicate) :param worksheet_template: the worksheet template to match against :param type: type of analyses to restrict that suit with the slots :return: a list of slots positions """ if not worksheet_template or type not in ALLOWED_ANALYSES_TYPES: return list() ws_slots = self.get_slot_positions(type) layout = worksheet_template.getLayout() slots = list() for row in layout: # skip rows that do not match with the given type if row['type'] != type: continue slot = to_int(row['pos']) if slot in ws_slots: # We only want those that are empty continue slots.append(slot) return slots
python
{ "resource": "" }
q21326
Worksheet.applyWorksheetTemplate
train
def applyWorksheetTemplate(self, wst): """ Add analyses to worksheet according to wst's layout. Will not overwrite slots which are filled already. If the selected template has an instrument assigned, it will only be applied to those analyses for which the instrument is allowed, the same happens with methods. """ # Store the Worksheet Template field self.getField('WorksheetTemplate').set(self, wst) if not wst: return # Apply the template for routine analyses self._apply_worksheet_template_routine_analyses(wst) # Apply the template for duplicate analyses self._apply_worksheet_template_duplicate_analyses(wst) # Apply the template for reference analyses (blanks and controls) self._apply_worksheet_template_reference_analyses(wst) # Assign the instrument instrument = wst.getInstrument() if instrument: self.setInstrument(instrument, True) # Assign the method method = wst.getRestrictToMethod() if method: self.setMethod(method, True)
python
{ "resource": "" }
q21327
Worksheet.getWorksheetServices
train
def getWorksheetServices(self): """get list of analysis services present on this worksheet """ services = [] for analysis in self.getAnalyses(): service = analysis.getAnalysisService() if service and service not in services: services.append(service) return services
python
{ "resource": "" }
q21328
Worksheet.setInstrument
train
def setInstrument(self, instrument, override_analyses=False): """ Sets the specified instrument to the Analysis from the Worksheet. Only sets the instrument if the Analysis allows it, according to its Analysis Service and Method. If an analysis has already assigned an instrument, it won't be overriden. The Analyses that don't allow the instrument specified will not be modified. Returns the number of analyses affected """ analyses = [an for an in self.getAnalyses() if (not an.getInstrument() or override_analyses) and an.isInstrumentAllowed(instrument)] total = 0 for an in analyses: # An analysis can be done using differents Methods. # Un method can be supported by more than one Instrument, # but not all instruments support one method. # We must force to set the instrument's method too. Otherwise, # the WS manage results view will display the an's default # method and its instruments displaying, only the instruments # for the default method in the picklist. instr_methods = instrument.getMethods() meth = instr_methods[0] if instr_methods else None if meth and an.isMethodAllowed(meth): if an.getMethod() not in instr_methods: an.setMethod(meth) an.setInstrument(instrument) total += 1 self.getField('Instrument').set(self, instrument) return total
python
{ "resource": "" }
q21329
Worksheet.setMethod
train
def setMethod(self, method, override_analyses=False): """ Sets the specified method to the Analyses from the Worksheet. Only sets the method if the Analysis allows to keep the integrity. If an analysis has already been assigned to a method, it won't be overriden. Returns the number of analyses affected. """ analyses = [an for an in self.getAnalyses() if (not an.getMethod() or not an.getInstrument() or override_analyses) and an.isMethodAllowed(method)] total = 0 for an in analyses: success = False if an.isMethodAllowed(method): success = an.setMethod(method) if success is True: total += 1 self.getField('Method').set(self, method) return total
python
{ "resource": "" }
q21330
Worksheet.checkUserManage
train
def checkUserManage(self): """ Checks if the current user has granted access to this worksheet and if has also privileges for managing it. """ granted = False can_access = self.checkUserAccess() if can_access is True: pm = getToolByName(self, 'portal_membership') edit_allowed = pm.checkPermission(EditWorksheet, self) if edit_allowed: # Check if the current user is the WS's current analyst member = pm.getAuthenticatedMember() analyst = self.getAnalyst().strip() if analyst != _c(member.getId()): # Has management privileges? if pm.checkPermission(ManageWorksheets, self): granted = True else: granted = True return granted
python
{ "resource": "" }
q21331
Worksheet.checkUserAccess
train
def checkUserAccess(self): """ Checks if the current user has granted access to this worksheet. Returns False if the user has no access, otherwise returns True """ # Deny access to foreign analysts allowed = True pm = getToolByName(self, "portal_membership") member = pm.getAuthenticatedMember() analyst = self.getAnalyst().strip() if analyst != _c(member.getId()): roles = member.getRoles() restrict = 'Manager' not in roles \ and 'LabManager' not in roles \ and 'LabClerk' not in roles \ and 'RegulatoryInspector' not in roles \ and self.bika_setup.getRestrictWorksheetUsersAccess() allowed = not restrict return allowed
python
{ "resource": "" }
q21332
Worksheet.getProgressPercentage
train
def getProgressPercentage(self): """Returns the progress percentage of this worksheet """ state = api.get_workflow_status_of(self) if state == "verified": return 100 steps = 0 query = dict(getWorksheetUID=api.get_uid(self)) analyses = api.search(query, CATALOG_ANALYSIS_LISTING) max_steps = len(analyses) * 2 for analysis in analyses: an_state = analysis.review_state if an_state in ["rejected", "retracted", "cancelled"]: steps += 2 elif an_state in ["verified", "published"]: steps += 2 elif an_state == "to_be_verified": steps += 1 if steps == 0: return 0 if steps > max_steps: return 100 return (steps * 100)/max_steps
python
{ "resource": "" }
q21333
PartitionSetupWidget.process_form
train
def process_form(self, instance, field, form, empty_marker = None, emptyReturnsMarker = False): """ Some special field handling for disabled fields, which don't get submitted by the browser but still need to be written away. """ bsc = getToolByName(instance, 'bika_setup_catalog') default = super(PartitionSetupWidget,self).process_form( instance, field, form, empty_marker, emptyReturnsMarker) if not default: return [], {} value = default[0] kwargs = len(default) > 1 and default[1] or {} newvalue = [] for v in value: v = dict(v) if v.get('separate', '') == 'on' and not 'preservation' in v: container_uid = v.get('container', [''])[0]; if container_uid: container = bsc(UID=container_uid)[0].getObject(); if container.getPrePreserved(): pres = container.getPreservation() if pres: v['preservation'] = [pres.UID()] newvalue.append(v) return newvalue, kwargs
python
{ "resource": "" }
q21334
guard_create_partitions
train
def guard_create_partitions(analysis_request): """Returns true if partitions can be created using the analysis request passed in as the source. """ if not analysis_request.bika_setup.getShowPartitions(): # If partitions are disabled in Setup, return False return False if analysis_request.isPartition(): # Do not allow the creation of partitions from partitions return False # Allow only the creation of partitions if all analyses from the Analysis # Request are in unassigned state. Otherwise, we could end up with # inconsistencies, because original analyses are deleted when the partition # is created. Note here we exclude analyses from children (partitions). analyses = analysis_request.objectValues("Analysis") for analysis in analyses: if api.get_workflow_status_of(analysis) != "unassigned": return False return analyses and True or False
python
{ "resource": "" }
q21335
guard_submit
train
def guard_submit(analysis_request): """Return whether the transition "submit" can be performed or not. Returns True if there is at least one analysis in a non-detached state and all analyses in a non-detached analyses have been submitted. """ analyses_ready = False for analysis in analysis_request.getAnalyses(): analysis = api.get_object(analysis) analysis_status = api.get_workflow_status_of(analysis) if analysis_status in ANALYSIS_DETACHED_STATES: continue if analysis_status in ['assigned', 'unassigned', 'registered']: return False analyses_ready = True return analyses_ready
python
{ "resource": "" }
q21336
guard_prepublish
train
def guard_prepublish(analysis_request): """Returns whether 'prepublish' transition can be perform or not. Returns True if the analysis request has at least one analysis in 'verified' or in 'to_be_verified' status. Otherwise, return False """ valid_states = ['verified', 'to_be_verified'] for analysis in analysis_request.getAnalyses(): analysis = api.get_object(analysis) if api.get_workflow_status_of(analysis) in valid_states: return True return False
python
{ "resource": "" }
q21337
guard_rollback_to_receive
train
def guard_rollback_to_receive(analysis_request): """Return whether 'rollback_to_receive' transition can be performed or not. Returns True if the analysis request has at least one analysis in 'assigned' or 'unassigned' status. Otherwise, returns False """ skipped = 0 valid_states = ['unassigned', 'assigned'] skip_states = ['retracted', 'rejected'] analyses = analysis_request.getAnalyses() for analysis in analyses: analysis = api.get_object(analysis) status = api.get_workflow_status_of(analysis) if status in valid_states: return True elif status in skip_states: skipped += 1 return len(analyses) == skipped
python
{ "resource": "" }
q21338
guard_cancel
train
def guard_cancel(analysis_request): """Returns whether 'cancel' transition can be performed or not. Returns True only if all analyses are in "unassigned" status """ # Ask to partitions for partition in analysis_request.getDescendants(all_descendants=False): if not isTransitionAllowed(partition, "cancel"): return False # Look through analyses. We've checked the partitions already, so there is # no need to look through analyses from partitions again, but through the # analyses directly bound to the current Analysis Request. cancellable_states = ["unassigned", "registered"] for analysis in analysis_request.objectValues("Analysis"): if api.get_workflow_status_of(analysis) not in cancellable_states: return False return True
python
{ "resource": "" }
q21339
guard_reinstate
train
def guard_reinstate(analysis_request): """Returns whether 'reinstate" transition can be performed or not. Returns True only if this is not a partition or the parent analysis request can be reinstated or is not in a cancelled state """ parent = analysis_request.getParentAnalysisRequest() if not parent: return True if api.get_workflow_status_of(parent) != "cancelled": return True return isTransitionAllowed(parent, "reinstate")
python
{ "resource": "" }
q21340
guard_sample
train
def guard_sample(analysis_request): """Returns whether 'sample' transition can be performed or not. Returns True only if the analysis request has the DateSampled and Sampler set or if the user belongs to the Samplers group """ if analysis_request.getDateSampled() and analysis_request.getSampler(): return True current_user = api.get_current_user() return "Sampler" in current_user.getRolesInContext(analysis_request)
python
{ "resource": "" }
q21341
assigned_state
train
def assigned_state(instance): """Returns `assigned` or `unassigned` depending on the state of the analyses the analysisrequest contains. Return `unassigned` if the Analysis Request has at least one analysis in `unassigned` state. Otherwise, returns `assigned` """ analyses = instance.getAnalyses() if not analyses: return "unassigned" for analysis in analyses: analysis_object = api.get_object(analysis) if not analysis_object.getWorksheet(): return "unassigned" return "assigned"
python
{ "resource": "" }
q21342
handle_errors
train
def handle_errors(f): """ simple JSON error handler """ import traceback from plone.jsonapi.core.helpers import error def decorator(*args, **kwargs): try: return f(*args, **kwargs) except Exception: var = traceback.format_exc() return error(var) return decorator
python
{ "resource": "" }
q21343
get_include_fields
train
def get_include_fields(request): """Retrieve include_fields values from the request """ include_fields = [] rif = request.get("include_fields", "") if "include_fields" in request: include_fields = [x.strip() for x in rif.split(",") if x.strip()] if "include_fields[]" in request: include_fields = request['include_fields[]'] return include_fields
python
{ "resource": "" }
q21344
load_brain_metadata
train
def load_brain_metadata(proxy, include_fields): """Load values from the catalog metadata into a list of dictionaries """ ret = {} for index in proxy.indexes(): if index not in proxy: continue if include_fields and index not in include_fields: continue val = getattr(proxy, index) if val != Missing.Value: try: json.dumps(val) except: continue ret[index] = val return ret
python
{ "resource": "" }
q21345
load_field_values
train
def load_field_values(instance, include_fields): """Load values from an AT object schema fields into a list of dictionaries """ ret = {} schema = instance.Schema() val = None for field in schema.fields(): fieldname = field.getName() if include_fields and fieldname not in include_fields: continue try: val = field.get(instance) except AttributeError: # If this error is raised, make a look to the add-on content # expressions used to obtain their data. print "AttributeError:", sys.exc_info()[1] print "Unreachable object. Maybe the object comes from an Add-on" print traceback.format_exc() if val: field_type = field.type # If it a proxy field, we should know to the type of the proxied # field if field_type == 'proxy': actual_field = field.get_proxy(instance) field_type = actual_field.type if field_type == "blob" or field_type == 'file': continue # I put the UID of all references here in *_uid. if field_type == 'reference': if type(val) in (list, tuple): ret[fieldname + "_uid"] = [v.UID() for v in val] val = [to_utf8(v.Title()) for v in val] else: ret[fieldname + "_uid"] = val.UID() val = to_utf8(val.Title()) elif field_type == 'boolean': val = True if val else False elif field_type == 'text': val = to_utf8(val) try: json.dumps(val) except: val = str(val) ret[fieldname] = val return ret
python
{ "resource": "" }
q21346
get_include_methods
train
def get_include_methods(request): """Retrieve include_methods values from the request """ methods = request.get("include_methods", "") include_methods = [ x.strip() for x in methods.split(",") if x.strip()] return include_methods
python
{ "resource": "" }
q21347
set_fields_from_request
train
def set_fields_from_request(obj, request): """Search request for keys that match field names in obj, and call field mutator with request value. The list of fields for which schema mutators were found is returned. """ schema = obj.Schema() # fields contains all schema-valid field values from the request. fields = {} for fieldname, value in request.items(): if fieldname not in schema: continue field = schema[fieldname] widget = field.getWidgetName() if widget in ["ReferenceWidget"]: brains = [] if value: brains = resolve_request_lookup(obj, request, fieldname) if not brains: logger.warning( "JSONAPI: Can't resolve reference: {} {}" .format(fieldname, value)) return [] if schema[fieldname].multiValued: value = [b.UID for b in brains] if brains else [] else: value = brains[0].UID if brains else None fields[fieldname] = value # Write fields. for fieldname, value in fields.items(): field = schema[fieldname] fieldtype = field.getType() if fieldtype == 'Products.Archetypes.Field.BooleanField': if value.lower() in ('0', 'false', 'no') or not value: value = False else: value = True elif fieldtype in ['Products.ATExtensions.field.records.RecordsField', 'Products.ATExtensions.field.records.RecordField']: try: value = eval(value) except: logger.warning( "JSONAPI: " + fieldname + ": Invalid " "JSON/Python variable") return [] mutator = field.getMutator(obj) if mutator: mutator(value) else: field.set(obj, value) obj.reindexObject() return fields.keys()
python
{ "resource": "" }
q21348
RejectionWidget.isVisible
train
def isVisible(self, instance, mode='view', default=None, field=None): """ This function returns the visibility of the widget depending on whether the rejection workflow is enabled or not. """ vis = super(RejectionWidget, self).isVisible( instance=instance, mode=mode, default=default, field=field) if instance.bika_setup.isRejectionWorkflowEnabled(): return vis else: return 'invisible'
python
{ "resource": "" }
q21349
RejectionWidget.rejectionOptionsList
train
def rejectionOptionsList(self): "Return a sorted list with the options defined in bikasetup" plone = getSite() settings = plone.bika_setup # RejectionReasons will return something like: # [{'checkbox': u'on', 'textfield-2': u'b', 'textfield-1': u'c', 'textfield-0': u'a'}] if len(settings.RejectionReasons) > 0: reject_reasons = settings.RejectionReasons[0] else: return [] sorted_keys = sorted(reject_reasons.keys()) if 'checkbox' in sorted_keys: sorted_keys.remove('checkbox') # Building the list with the values only because the keys are not needed any more items = [] for key in sorted_keys: items.append(reject_reasons[key].strip()) return items
python
{ "resource": "" }
q21350
Import
train
def Import(context, request): """ Read Dimensional-CSV analysis results """ form = request.form # TODO form['file'] sometimes returns a list infile = form['instrument_results_file'][0] if \ isinstance(form['instrument_results_file'], list) else \ form['instrument_results_file'] artoapply = form['artoapply'] override = form['results_override'] instrument = form.get('instrument', None) errors = [] logs = [] # Load the most suitable parser according to file extension/options/etc... parser = None if not hasattr(infile, 'filename'): errors.append(_("No file selected")) parser = TwoDimensionCSVParser(infile) status = get_instrument_import_ar_allowed_states(artoapply) over = get_instrument_import_override(override) importer = TwoDimensionImporter(parser=parser, context=context, allowed_ar_states=status, allowed_analysis_states=None, override=over, instrument_uid=instrument, form=form) tbex = '' try: importer.process() except: tbex = traceback.format_exc() errors = importer.errors logs = importer.logs warns = importer.warns if tbex: errors.append(tbex) results = {'errors': errors, 'log': logs, 'warns': warns} return json.dumps(results)
python
{ "resource": "" }
q21351
guard_unassign
train
def guard_unassign(duplicate_analysis): """Return whether the transition 'unassign' can be performed or not """ analysis = duplicate_analysis.getAnalysis() if wf.isTransitionAllowed(analysis, "unassign"): return True skip = ["retracted", "rejected", "unassigned"] if api.get_review_status(analysis) in skip: return True return analysis_guards.guard_unassign(duplicate_analysis)
python
{ "resource": "" }
q21352
ReflexRuleWidget._get_sorted_cond_keys
train
def _get_sorted_cond_keys(self, keys_list): """ This function returns only the elements starting with 'analysisservice-' in 'keys_list'. The returned list is sorted by the index appended to the end of each element """ # The names can be found in reflexrulewidget.pt inside the # conditionscontainer div. cond_list = [] for key in keys_list: if key.startswith('analysisservice-'): cond_list.append(key) cond_list.sort() return cond_list
python
{ "resource": "" }
q21353
ReflexRuleWidget._get_sorted_action_keys
train
def _get_sorted_action_keys(self, keys_list): """ This function returns only the elements starting with 'action-' in 'keys_list'. The returned list is sorted by the index appended to the end of each element """ # The names can be found in reflexrulewidget.pt inside the # Reflex action rules list section. action_list = [] for key in keys_list: if key.startswith('action-'): action_list.append(key) action_list.sort() return action_list
python
{ "resource": "" }
q21354
SamplingRoundAddedEventHandler
train
def SamplingRoundAddedEventHandler(instance, event): """ Event fired when BikaSetup object gets modified. Since Sampling Round is a dexterity object we have to change the ID by "hand" Then we have to redirect the user to the ar add form """ if instance.portal_type != "SamplingRound": print("How does this happen: type is %s should be SamplingRound" % instance.portal_type) return renameAfterCreation(instance) num_art = len(instance.ar_templates) destination_url = instance.aq_parent.absolute_url() + \ "/portal_factory/" + \ "AnalysisRequest/Request new analyses/ar_add?samplinground=" + \ instance.UID() + "&ar_count=" + str(num_art) request = getattr(instance, 'REQUEST', None) request.response.redirect(destination_url)
python
{ "resource": "" }
q21355
AnalysisProfileAnalysesWidget.process_form
train
def process_form(self, instance, field, form, empty_marker=None, emptyReturnsMarker=False): """Return UIDs of the selected services for the AnalysisProfile reference field """ # selected services service_uids = form.get("uids", []) # hidden services hidden_services = form.get("Hidden", {}) # get the service objects services = map(api.get_object_by_uid, service_uids) # get dependencies dependencies = map(lambda s: s.getServiceDependencies(), services) dependencies = list(itertools.chain.from_iterable(dependencies)) # Merge dependencies and services services = set(services + dependencies) as_settings = [] for service in services: service_uid = api.get_uid(service) hidden = hidden_services.get(service_uid, "") == "on" as_settings.append({"uid": service_uid, "hidden": hidden}) # set the analysis services settings instance.setAnalysisServicesSettings(as_settings) return map(api.get_uid, services), {}
python
{ "resource": "" }
q21356
InvoiceView.format_price
train
def format_price(self, price): """Formats the price with the set decimal mark and currency """ # ensure we have a float price = api.to_float(price, default=0.0) dm = self.get_decimal_mark() cur = self.get_currency_symbol() price = "%s %.2f" % (cur, price) return price.replace(".", dm)
python
{ "resource": "" }
q21357
InvoiceView.get_billable_items
train
def get_billable_items(self): """Return a list of billable items """ items = [] for obj in self.context.getBillableItems(): if self.is_profile(obj): items.append({ "obj": obj, "title": obj.Title(), "vat": obj.getAnalysisProfileVAT(), "price": self.format_price(obj.getAnalysisProfilePrice()), }) if self.is_analysis(obj): items.append({ "obj": obj, "title": obj.Title(), "vat": obj.getVAT(), "price": self.format_price(obj.getPrice()), }) return items
python
{ "resource": "" }
q21358
InstrumentCertification.setValidTo
train
def setValidTo(self, value): """Custom setter method to calculate a `ValidTo` date based on the `ValidFrom` and `ExpirationInterval` field values. """ valid_from = self.getValidFrom() valid_to = DateTime(value) interval = self.getExpirationInterval() if valid_from and interval: valid_to = valid_from + int(interval) self.getField("ValidTo").set(self, valid_to) logger.debug("Set ValidTo Date to: %r" % valid_to) else: # just set the value self.getField("ValidTo").set(self, valid_to)
python
{ "resource": "" }
q21359
InstrumentCertification.getInterval
train
def getInterval(self): """Vocabulary of date intervals to calculate the "To" field date based from the "From" field date. """ items = ( ("", _(u"Not set")), ("1", _(u"daily")), ("7", _(u"weekly")), ("30", _(u"monthly")), ("90", _(u"quarterly")), ("180", _(u"biannually")), ("365", _(u"yearly")), ) return DisplayList(items)
python
{ "resource": "" }
q21360
InstrumentCertification.isValid
train
def isValid(self): """Returns if the current certificate is in a valid date range """ today = DateTime() valid_from = self.getValidFrom() valid_to = self.getValidTo() return valid_from <= today <= valid_to
python
{ "resource": "" }
q21361
InstrumentCertification.getDaysToExpire
train
def getDaysToExpire(self): """Returns the days until this certificate expires :returns: Days until the certificate expires :rtype: int """ delta = 0 today = DateTime() valid_from = self.getValidFrom() or today valid_to = self.getValidTo() # one of the fields is not set, return 0 days if not valid_from or not valid_to: return 0 # valid_from comes after valid_to? if valid_from > valid_to: return 0 # calculate the time between today and valid_to, even if valid_from # is in the future. else: delta = valid_to - today return int(math.ceil(delta))
python
{ "resource": "" }
q21362
get_workflows
train
def get_workflows(): """Returns a mapping of id->workflow """ wftool = api.get_tool("portal_workflow") wfs = {} for wfid in wftool.objectIds(): wf = wftool.getWorkflowById(wfid) if hasattr(aq_base(wf), "updateRoleMappingsFor"): wfs[wfid] = wf return wfs
python
{ "resource": "" }
q21363
update_role_mappings
train
def update_role_mappings(obj, wfs=None, reindex=True): """Update the role mappings of the given object """ wftool = api.get_tool("portal_workflow") if wfs is None: wfs = get_workflows() chain = wftool.getChainFor(obj) for wfid in chain: wf = wfs[wfid] wf.updateRoleMappingsFor(obj) if reindex is True: obj.reindexObject(idxs=["allowedRolesAndUsers"]) return obj
python
{ "resource": "" }
q21364
fix_client_permissions
train
def fix_client_permissions(portal): """Fix client permissions """ wfs = get_workflows() start = time.time() clients = portal.clients.objectValues() total = len(clients) for num, client in enumerate(clients): logger.info("Fixing permission for client {}/{} ({})" .format(num, total, client.getName())) update_role_mappings(client, wfs=wfs) end = time.time() logger.info("Fixing client permissions took %.2fs" % float(end-start)) transaction.commit()
python
{ "resource": "" }
q21365
before_sample
train
def before_sample(analysis_request): """Method triggered before "sample" transition for the Analysis Request passed in is performed """ if not analysis_request.getDateSampled(): analysis_request.setDateSampled(DateTime()) if not analysis_request.getSampler(): analysis_request.setSampler(api.get_current_user().id)
python
{ "resource": "" }
q21366
after_reinstate
train
def after_reinstate(analysis_request): """Method triggered after a 'reinstate' transition for the Analysis Request passed in is performed. Sets its status to the last status before it was cancelled. Reinstates the descendant partitions and all the analyses associated to the analysis request as well. """ do_action_to_descendants(analysis_request, "reinstate") do_action_to_analyses(analysis_request, "reinstate") # Force the transition to previous state before the request was cancelled prev_status = get_prev_status_from_history(analysis_request, "cancelled") changeWorkflowState(analysis_request, AR_WORKFLOW_ID, prev_status, action="reinstate") analysis_request.reindexObject()
python
{ "resource": "" }
q21367
after_receive
train
def after_receive(analysis_request): """Method triggered after "receive" transition for the Analysis Request passed in is performed """ # Mark this analysis request as IReceived alsoProvides(analysis_request, IReceived) analysis_request.setDateReceived(DateTime()) do_action_to_analyses(analysis_request, "initialize")
python
{ "resource": "" }
q21368
after_sample
train
def after_sample(analysis_request): """Method triggered after "sample" transition for the Analysis Request passed in is performed """ analysis_request.setDateSampled(DateTime()) idxs = ['getDateSampled'] for analysis in analysis_request.getAnalyses(full_objects=True): analysis.reindexObject(idxs=idxs)
python
{ "resource": "" }
q21369
AbstractRoutineAnalysis.getClientTitle
train
def getClientTitle(self): """Used to populate catalog values. Returns the Title of the client for this analysis' AR. """ request = self.getRequest() if request: client = request.getClient() if client: return client.Title()
python
{ "resource": "" }
q21370
AbstractRoutineAnalysis.getClientUID
train
def getClientUID(self): """Used to populate catalog values. Returns the UID of the client for this analysis' AR. """ request = self.getRequest() if request: client = request.getClient() if client: return client.UID()
python
{ "resource": "" }
q21371
AbstractRoutineAnalysis.getClientURL
train
def getClientURL(self): """This method is used to populate catalog values Returns the URL of the client for this analysis' AR. """ request = self.getRequest() if request: client = request.getClient() if client: return client.absolute_url_path()
python
{ "resource": "" }
q21372
AbstractRoutineAnalysis.getDateReceived
train
def getDateReceived(self): """Used to populate catalog values. Returns the date the Analysis Request this analysis belongs to was received. If the analysis was created after, then returns the date the analysis was created. """ request = self.getRequest() if request: ar_date = request.getDateReceived() if ar_date and self.created() > ar_date: return self.created() return ar_date return None
python
{ "resource": "" }
q21373
AbstractRoutineAnalysis.getDueDate
train
def getDueDate(self): """Used to populate getDueDate index and metadata. This calculates the difference between the time the analysis processing started and the maximum turnaround time. If the analysis has no turnaround time set or is not yet ready for proces, returns None """ tat = self.getMaxTimeAllowed() if not tat: return None start = self.getStartProcessDate() if not start: return None # delta time when the first analysis is considered as late delta = timedelta(minutes=api.to_minutes(**tat)) # calculated due date end = dt2DT(DT2dt(start) + delta) # delta is within one day, return immediately if delta.days == 0: return end # get the laboratory workdays setup = api.get_setup() workdays = setup.getWorkdays() # every day is a workday, no need for calculation if workdays == tuple(map(str, range(7))): return end # reset the due date to the received date, and add only for configured # workdays another day due_date = end - delta.days days = 0 while days < delta.days: # add one day to the new due date due_date += 1 # skip if the weekday is a non working day if str(due_date.asdatetime().weekday()) not in workdays: continue days += 1 return due_date
python
{ "resource": "" }
q21374
AbstractRoutineAnalysis.getResultsRange
train
def getResultsRange(self): """Returns the valid result range for this routine analysis based on the results ranges defined in the Analysis Request this routine analysis is assigned to. A routine analysis will be considered out of range if it result falls out of the range defined in "min" and "max". If there are values set for "warn_min" and "warn_max", these are used to compute the shoulders in both ends of the range. Thus, an analysis can be out of range, but be within shoulders still. :return: A dictionary with keys "min", "max", "warn_min" and "warn_max" :rtype: dict """ specs = ResultsRangeDict() analysis_request = self.getRequest() if not analysis_request: return specs keyword = self.getKeyword() ar_ranges = analysis_request.getResultsRange() # Get the result range that corresponds to this specific analysis an_range = [rr for rr in ar_ranges if rr.get('keyword', '') == keyword] return an_range and an_range[0] or specs
python
{ "resource": "" }
q21375
AbstractRoutineAnalysis.getHidden
train
def getHidden(self): """ Returns whether if the analysis must be displayed in results reports or not, as well as in analyses view when the user logged in is a Client Contact. If the value for the field HiddenManually is set to False, this function will delegate the action to the method getAnalysisServiceSettings() from the Analysis Request. If the value for the field HiddenManually is set to True, this function will return the value of the field Hidden. :return: true or false :rtype: bool """ if self.getHiddenManually(): return self.getField('Hidden').get(self) request = self.getRequest() if request: service_uid = self.getServiceUID() ar_settings = request.getAnalysisServiceSettings(service_uid) return ar_settings.get('hidden', False) return False
python
{ "resource": "" }
q21376
AbstractRoutineAnalysis.setHidden
train
def setHidden(self, hidden): """ Sets if this analysis must be displayed or not in results report and in manage analyses view if the user is a lab contact as well. The value set by using this field will have priority over the visibility criteria set at Analysis Request, Template or Profile levels (see field AnalysisServiceSettings from Analysis Request. To achieve this behavior, this setter also sets the value to HiddenManually to true. :param hidden: true if the analysis must be hidden in report :type hidden: bool """ self.setHiddenManually(True) self.getField('Hidden').set(self, hidden)
python
{ "resource": "" }
q21377
LabProduct.getTotalPrice
train
def getTotalPrice(self): """ compute total price """ price = self.getPrice() price = Decimal(price or '0.00') vat = Decimal(self.getVAT()) vat = vat and vat / 100 or 0 price = price + (price * vat) return price.quantize(Decimal('0.00'))
python
{ "resource": "" }
q21378
SupplyOrder.getProductUIDs
train
def getProductUIDs(self): """ return the uids of the products referenced by order items """ uids = [] for orderitem in self.objectValues('XupplyOrderItem'): product = orderitem.getProduct() if product is not None: uids.append(orderitem.getProduct().UID()) return uids
python
{ "resource": "" }
q21379
ReferenceWidget.process_form
train
def process_form(self, instance, field, form, empty_marker=None, emptyReturnsMarker=False): """Return a UID so that ReferenceField understands. """ fieldName = field.getName() if fieldName + "_uid" in form: uid = form.get(fieldName + "_uid", '') if field.multiValued and\ (isinstance(uid, str) or isinstance(uid, unicode)): uid = uid.split(",") elif fieldName in form: uid = form.get(fieldName, '') if field.multiValued and\ (isinstance(uid, str) or isinstance(uid, unicode)): uid = uid.split(",") else: uid = None return uid, {}
python
{ "resource": "" }
q21380
ajaxReferenceWidgetSearch.get_field_names
train
def get_field_names(self): """Return the field names to get values for """ col_model = self.request.get("colModel", None) if not col_model: return ["UID",] names = [] col_model = json.loads(_u(col_model)) if isinstance(col_model, (list, tuple)): names = map(lambda c: c.get("columnName", "").strip(), col_model) # UID is used by reference widget to know the object that the user # selected from the popup list if "UID" not in names: names.append("UID") return filter(None, names)
python
{ "resource": "" }
q21381
ajaxReferenceWidgetSearch.get_data_record
train
def get_data_record(self, brain, field_names): """Returns a dict with the column values for the given brain """ record = {} model = None for field_name in field_names: # First try to get the value directly from the brain value = getattr(brain, field_name, None) # No metadata for this column name if value is None: logger.warn("Not a metadata field: {}".format(field_name)) model = model or SuperModel(brain) value = model.get(field_name, None) if callable(value): value = value() # '&nbsp;' instead of '' because empty div fields don't render # correctly in combo results table record[field_name] = value or "&nbsp;" return record
python
{ "resource": "" }
q21382
ajaxReferenceWidgetSearch.search
train
def search(self): """Returns the list of brains that match with the request criteria """ brains = [] # TODO Legacy for name, adapter in getAdapters((self.context, self.request), IReferenceWidgetVocabulary): brains.extend(adapter()) return brains
python
{ "resource": "" }
q21383
ajaxReferenceWidgetSearch.to_data_rows
train
def to_data_rows(self, brains): """Returns a list of dictionaries representing the values of each brain """ fields = self.get_field_names() return map(lambda brain: self.get_data_record(brain, fields), brains)
python
{ "resource": "" }
q21384
ajaxReferenceWidgetSearch.to_json_payload
train
def to_json_payload(self, data_rows): """Returns the json payload """ num_rows = len(data_rows) num_page = self.num_page num_rows_page = self.num_rows_page pages = num_rows / num_rows_page pages += divmod(num_rows, num_rows_page)[1] and 1 or 0 start = (num_page - 1) * num_rows_page end = num_page * num_rows_page payload = {"page": num_page, "total": pages, "records": num_rows, "rows": data_rows[start:end]} return json.dumps(payload)
python
{ "resource": "" }
q21385
rename_retract_ar_transition
train
def rename_retract_ar_transition(portal): """Renames retract_ar transition to invalidate """ logger.info("Renaming 'retract_ar' transition to 'invalidate'") wf_tool = api.get_tool("portal_workflow") workflow = wf_tool.getWorkflowById("bika_ar_workflow") if "invalidate" not in workflow.transitions: workflow.transitions.addTransition("invalidate") transition = workflow.transitions.invalidate transition.setProperties( title="Invalidate", new_state_id="invalid", after_script_name="", actbox_name="Invalidate", ) guard = transition.guard or Guard() guard_props = {"guard_permissions": "BIKA: Retract", "guard_roles": "", "guard_expr": "python:here.guard_cancelled_object()"} guard.changeFromProperties(guard_props) transition.guard = guard for state in workflow.states.values(): if 'retract_ar' in state.transitions: trans = filter(lambda id: id != 'retract_ar', state.transitions) trans += ('invalidate', ) state.transitions = trans if "retract_ar" in workflow.transitions: workflow.transitions.deleteTransitions(["retract_ar"])
python
{ "resource": "" }
q21386
rebind_invalidated_ars
train
def rebind_invalidated_ars(portal): """Rebind the ARs automatically generated because of the retraction of their parent to the new field 'Invalidated'. The field used until now 'ParentAnalysisRequest' will be used for partitioning """ logger.info("Rebinding retracted/invalidated ARs") # Walk through the Analysis Requests that were generated because of an # invalidation, get the source AR and rebind the fields relationship = "AnalysisRequestChildAnalysisRequest" ref_catalog = api.get_tool(REFERENCE_CATALOG) retests = ref_catalog(relationship=relationship) total = len(retests) to_remove = list() num = 0 for num, relation in enumerate(retests, start=1): relation = relation.getObject() if not relation: continue retest = relation.getTargetObject() invalidated = relation.getSourceObject() retest.setInvalidated(invalidated) # Set ParentAnalysisRequest field to None, cause we will use this field # for storing Primary-Partitions relationship. retest.setParentAnalysisRequest(None) # Remove the relationship! to_remove.append((relation.aq_parent, relation.id)) if num % 100 == 0: logger.info("Rebinding invalidated ARs: {0}/{1}" .format(num, total)) # Remove relationships for relation_to_remove in to_remove: folder = relation_to_remove[0] rel_id = relation_to_remove[1] folder.manage_delObjects([rel_id]) logger.info("Rebound {} invalidated ARs".format(num))
python
{ "resource": "" }
q21387
recatalog_analyses_due_date
train
def recatalog_analyses_due_date(portal): """Recatalog the index and metadata field 'getDueDate' """ logger.info("Updating Analyses getDueDate") # No need to update those analyses that are verified or published. Only # those that are under work catalog = api.get_tool(CATALOG_ANALYSIS_LISTING) review_states = ["retracted", "sample_due", "attachment_due", "sample_received", "to_be_verified"] query = dict(portal_type="Analysis", review_state=review_states) analyses = api.search(query, CATALOG_ANALYSIS_LISTING) total = len(analyses) num = 0 for num, analysis in enumerate(analyses, start=1): analysis = api.get_object(analysis) catalog.catalog_object(analysis, idxs=['getDueDate']) if num % 100 == 0: logger.info("Updating Analysis getDueDate: {0}/{1}" .format(num, total)) logger.info("{} Analyses updated".format(num))
python
{ "resource": "" }
q21388
update_rejection_permissions
train
def update_rejection_permissions(portal): """Adds the permission 'Reject Analysis Request' and update the permission mappings accordingly """ updated = update_rejection_permissions_for(portal, "bika_ar_workflow", "Reject Analysis Request") if updated: brains = api.search(dict(portal_type="AnalysisRequest"), CATALOG_ANALYSIS_REQUEST_LISTING) update_rolemappings_for(brains, "bika_ar_workflow") updated = update_rejection_permissions_for(portal, "bika_sample_workflow", "Reject Sample") if updated: brains = api.search(dict(portal_type="Sample"), "bika_catalog") update_rolemappings_for(brains, "bika_sample_workflow")
python
{ "resource": "" }
q21389
update_analaysisrequests_due_date
train
def update_analaysisrequests_due_date(portal): """Removes the metadata getLate from ar-catalog and adds the column getDueDate""" logger.info("Updating getLate -> getDueDate metadata columns ...") catalog_objects = False catalog = api.get_tool(CATALOG_ANALYSIS_REQUEST_LISTING) if "getLate" in catalog.schema(): catalog.delColumn("getLate") if "getDueDate" in catalog.schema(): logger.info("getDueDate column already in catalog [SKIP]") else: logger.info("Adding column 'getDueDate' to catalog '{}' ..." .format(catalog.id)) catalog.addColumn("getDueDate") catalog_objects = True if "getDueDate" in catalog.indexes(): logger.info("getDueDate index already in catalog [SKIP]") else: logger.info("Adding index 'getDueDate' to catalog '{}'" .format(catalog.id)) catalog.addIndex("getDueDate", "DateIndex") if not catalog_objects: catalog.manage_reindexIndex("getDueDate") if catalog_objects: # Only recatalog the objects if the column getDueDate was not there num = 0 query = dict(portal_type="AnalysisRequest") ar_brains = api.search(query, CATALOG_ANALYSIS_REQUEST_LISTING) total = len(ar_brains) for num, analysis_request in enumerate(ar_brains): analysis_request = api.get_object(analysis_request) analysis_request.reindexObject(idxs=['getDueDate']) if num % 100 == 0: logger.info("Updating Analysis Request getDueDate: {0}/{1}" .format(num, total)) logger.info("{} Analysis Requests updated".format(num)) logger.info("Updating getLate -> getDueDate metadata columns [DONE]")
python
{ "resource": "" }
q21390
PartitionMagicView.push_primary_analyses_for_removal
train
def push_primary_analyses_for_removal(self, analysis_request, analyses): """Stores the analyses to be removed after partitions creation """ to_remove = self.analyses_to_remove.get(analysis_request, []) to_remove.extend(analyses) self.analyses_to_remove[analysis_request] = list(set(to_remove))
python
{ "resource": "" }
q21391
PartitionMagicView.remove_primary_analyses
train
def remove_primary_analyses(self): """Remove analyses relocated to partitions """ for ar, analyses in self.analyses_to_remove.items(): analyses_ids = list(set(map(api.get_id, analyses))) ar.manage_delObjects(analyses_ids) self.analyses_to_remove = dict()
python
{ "resource": "" }
q21392
PartitionMagicView.get_ar_data
train
def get_ar_data(self): """Returns a list of AR data """ for obj in self.get_objects(): info = self.get_base_info(obj) info.update({ "analyses": self.get_analysis_data_for(obj), "sampletype": self.get_base_info(obj.getSampleType()), "number_of_partitions": self.get_number_of_partitions_for(obj), "template": self.get_template_data_for(obj), }) yield info
python
{ "resource": "" }
q21393
PartitionMagicView.get_sampletype_data
train
def get_sampletype_data(self): """Returns a list of SampleType data """ for obj in self.get_sampletypes(): info = self.get_base_info(obj) yield info
python
{ "resource": "" }
q21394
PartitionMagicView.get_container_data
train
def get_container_data(self): """Returns a list of Container data """ for obj in self.get_containers(): info = self.get_base_info(obj) yield info
python
{ "resource": "" }
q21395
PartitionMagicView.get_preservation_data
train
def get_preservation_data(self): """Returns a list of Preservation data """ for obj in self.get_preservations(): info = self.get_base_info(obj) yield info
python
{ "resource": "" }
q21396
PartitionMagicView.get_sampletypes
train
def get_sampletypes(self): """Returns the available SampleTypes of the system """ query = { "portal_type": "SampleType", "sort_on": "sortable_title", "sort_order": "ascending", "is_active": True, } results = api.search(query, "bika_setup_catalog") return map(api.get_object, results)
python
{ "resource": "" }
q21397
PartitionMagicView.get_containers
train
def get_containers(self): """Returns the available Containers of the system """ query = dict(portal_type="Container", sort_on="sortable_title", sort_order="ascending", is_active=True) results = api.search(query, "bika_setup_catalog") return map(api.get_object, results)
python
{ "resource": "" }
q21398
PartitionMagicView.get_analysis_data_for
train
def get_analysis_data_for(self, ar): """Return the Analysis data for this AR """ # Exclude analyses from children (partitions) analyses = ar.objectValues("Analysis") out = [] for an in analyses: info = self.get_base_info(an) info.update({ "service_uid": an.getServiceUID(), }) out.append(info) return out
python
{ "resource": "" }
q21399
PartitionMagicView.get_template_data_for
train
def get_template_data_for(self, ar): """Return the Template data for this AR """ info = None template = ar.getTemplate() ar_sampletype_uid = api.get_uid(ar.getSampleType()) ar_container_uid = "" if ar.getContainer(): ar_container_uid = api.get_uid(ar.getContainer()) ar_preservation_uid = "" if ar.getPreservation(): ar_preservation_uid = api.get_uid(ar.getPreservation()) if template: info = self.get_base_info(template) analyses = template.getAnalyses() partition_analyses = map( lambda x: (x.get("partition"), x.get("service_uid")), analyses) analyses_by_partition = defaultdict(list) for partition, service_uid in partition_analyses: analyses_by_partition[partition].append(service_uid) sampletypes_by_partition = defaultdict(list) containers_by_partition = defaultdict(list) preservations_by_partition = defaultdict(list) for part in template.getPartitions(): part_id = part.get("part_id") sampletype_uid = part.get('sampletype_uid', ar_sampletype_uid) sampletypes_by_partition[part_id] = sampletype_uid container_uid = part.get("container_uid", ar_container_uid) containers_by_partition[part_id] = container_uid preserv_uid = part.get("preservation_uid", ar_preservation_uid) preservations_by_partition[part_id] = preserv_uid partitions = map(lambda p: p.get("part_id"), template.getPartitions()) info.update({ "analyses": analyses_by_partition, "partitions": partitions, "sample_types": sampletypes_by_partition, "containers": containers_by_partition, "preservations": preservations_by_partition, }) else: info = { "analyses": {}, "partitions": [], "sample_types": {}, "containers": {}, "preservations": {}, } return info
python
{ "resource": "" }