_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q21400
PartitionMagicView.get_number_of_partitions_for
train
def get_number_of_partitions_for(self, ar): """Return the number of selected partitions """ # fetch the number of partitions from the request uid = api.get_uid(ar) num = self.request.get("primary", {}).get(uid) if num is None: # get the number of partitions from the template template = ar.getTemplate() if template: num = len(template.getPartitions()) else: num = DEFAULT_NUMBER_OF_PARTITIONS try: num = int(num) except (TypeError, ValueError): num = DEFAULT_NUMBER_OF_PARTITIONS return num
python
{ "resource": "" }
q21401
PartitionMagicView.get_base_info
train
def get_base_info(self, obj): """Extract the base info from the given object """ obj = api.get_object(obj) review_state = api.get_workflow_status_of(obj) state_title = review_state.capitalize().replace("_", " ") return { "obj": obj, "id": api.get_id(obj), "uid": api.get_uid(obj), "title": api.get_title(obj), "path": api.get_path(obj), "url": api.get_url(obj), "review_state": review_state, "state_title": state_title, }
python
{ "resource": "" }
q21402
AccreditationView.selected_cats
train
def selected_cats(self, items): """return a list of all categories with accredited services """ cats = [] for item in items: if 'category' in item and item['category'] not in cats: cats.append(item['category']) return cats
python
{ "resource": "" }
q21403
AnalysisRequestPublishedResults.contentsMethod
train
def contentsMethod(self, contentFilter): """ ARReport objects associated to the current Analysis request. If the user is not a Manager or LabManager or Client, no items are displayed. """ allowedroles = ['Manager', 'LabManager', 'Client', 'LabClerk'] pm = getToolByName(self.context, "portal_membership") member = pm.getAuthenticatedMember() roles = member.getRoles() allowed = [a for a in allowedroles if a in roles] return self.context.objectValues('ARReport') if allowed else []
python
{ "resource": "" }
q21404
Report.add_filter_by_client
train
def add_filter_by_client(self, query, out_params): """Applies the filter by client to the search query """ current_client = logged_in_client(self.context) if current_client: query['getClientUID'] = api.get_uid(current_client) elif self.request.form.get("ClientUID", ""): query['getClientUID'] = self.request.form['ClientUID'] client = api.get_object_by_uid(query['getClientUID']) out_params.append({'title': _('Client'), 'value': client.Title(), 'type': 'text'})
python
{ "resource": "" }
q21405
Report.add_filter_by_date
train
def add_filter_by_date(self, query, out_params): """Applies the filter by Requested date to the search query """ date_query = formatDateQuery(self.context, 'Requested') if date_query: query['created'] = date_query requested = formatDateParms(self.context, 'Requested') out_params.append({'title': _('Requested'), 'value': requested, 'type': 'text'})
python
{ "resource": "" }
q21406
Report.add_filter_by_review_state
train
def add_filter_by_review_state(self, query, out_params): """Applies the filter by review_state to the search query """ self.add_filter_by_wf_state(query=query, out_params=out_params, wf_id="bika_analysis_workflow", index="review_state", title=_("Status"))
python
{ "resource": "" }
q21407
Import
train
def Import(context, request): """ Beckman Coulter Access 2 analysis results """ infile = request.form['rochecobas_taqman_model48_file'] fileformat = request.form['rochecobas_taqman_model48_format'] artoapply = request.form['rochecobas_taqman_model48_artoapply'] override = request.form['rochecobas_taqman_model48_override'] instrument = request.form.get('instrument', None) errors = [] logs = [] warns = [] # Load the most suitable parser according to file extension/options/etc... parser = None if not hasattr(infile, 'filename'): errors.append(_("No file selected")) if fileformat == 'rsf': parser = RocheCobasTaqmanParser(infile) if fileformat == 'csv': parser = RocheCobasTaqmanParser(infile, "csv") else: errors.append(t(_("Unrecognized file format ${fileformat}", mapping={"fileformat": fileformat}))) if parser: # Load the importer status = ['sample_received', 'attachment_due', 'to_be_verified'] if artoapply == 'received': status = ['sample_received'] elif artoapply == 'received_tobeverified': status = ['sample_received', 'attachment_due', 'to_be_verified'] over = [False, False] if override == 'nooverride': over = [False, False] elif override == 'override': over = [True, False] elif override == 'overrideempty': over = [True, True] importer = RocheCobasTaqman48Importer(parser=parser, context=context, allowed_ar_states=status, allowed_analysis_states=None, override=over, instrument_uid=instrument) tbex = '' try: importer.process() except: tbex = traceback.format_exc() errors = importer.errors logs = importer.logs warns = importer.warns if tbex: errors.append(tbex) results = {'errors': errors, 'log': logs, 'warns': warns} return json.dumps(results)
python
{ "resource": "" }
q21408
Attachment.getClientUID
train
def getClientUID(self): """Return the UID of the client """ client = api.get_parent(self) if not client: return "" return api.get_uid(client)
python
{ "resource": "" }
q21409
Attachment.getLinkedRequests
train
def getLinkedRequests(self): """Lookup linked Analysis Requests :returns: sorted list of ARs, where the latest AR comes first """ rc = api.get_tool("reference_catalog") refs = rc.getBackReferences(self, "AnalysisRequestAttachment") # fetch the objects by UID and handle nonexisting UIDs gracefully ars = map(lambda ref: api.get_object_by_uid(ref.sourceUID, None), refs) # filter out None values (nonexisting UIDs) ars = filter(None, ars) # sort by physical path, so that attachments coming from an AR with a # higher "-Rn" suffix get sorted correctly. # N.B. the created date is the same, hence we can not use it return sorted(ars, key=api.get_path, reverse=True)
python
{ "resource": "" }
q21410
Attachment.getLinkedAnalyses
train
def getLinkedAnalyses(self): """Lookup linked Analyses :returns: sorted list of ANs, where the latest AN comes first """ # Fetch the linked Analyses UIDs refs = get_backreferences(self, "AnalysisAttachment") # fetch the objects by UID and handle nonexisting UIDs gracefully ans = map(lambda uid: api.get_object_by_uid(uid, None), refs) # filter out None values (nonexisting UIDs) ans = filter(None, ans) # sort by physical path, so that attachments coming from an AR with a # higher "-Rn" suffix get sorted correctly. # N.B. the created date is the same, hence we can not use it return sorted(ans, key=api.get_path, reverse=True)
python
{ "resource": "" }
q21411
Attachment.getTextTitle
train
def getTextTitle(self): """Return a title for texts and listings """ request_id = self.getRequestID() if not request_id: return "" analysis = self.getAnalysis() if not analysis: return request_id return "%s - %s" % (request_id, analysis.Title())
python
{ "resource": "" }
q21412
Attachment.getRequest
train
def getRequest(self): """Return the primary AR this attachment is linked """ ars = self.getLinkedRequests() if len(ars) > 1: # Attachment is assigned to more than one Analysis Request. # This might happen when the AR was invalidated ar_ids = ", ".join(map(api.get_id, ars)) logger.info("Attachment assigned to more than one AR: [{}]. " "The first AR will be returned".format(ar_ids)) # return the first AR if len(ars) >= 1: return ars[0] # Check if the attachment is linked to an analysis and try to get the # AR from the linked analysis analysis = self.getAnalysis() if IRequestAnalysis.providedBy(analysis): return analysis.getRequest() return None
python
{ "resource": "" }
q21413
Attachment.getAnalysis
train
def getAnalysis(self): """Return the primary analysis this attachment is linked """ analysis = None ans = self.getLinkedAnalyses() if len(ans) > 1: # Attachment is assigned to more than one Analysis. This might # happen when the AR was invalidated an_ids = ", ".join(map(api.get_id, ans)) logger.info("Attachment assigned to more than one Analysis: [{}]. " "The first Analysis will be returned".format(an_ids)) if len(ans) >= 1: analysis = ans[0] return analysis
python
{ "resource": "" }
q21414
Method.getInstrumentsDisplayList
train
def getInstrumentsDisplayList(self): """Instruments capable to perform this method """ items = [(i.UID(), i.Title()) for i in self.getInstruments()] return DisplayList(list(items))
python
{ "resource": "" }
q21415
FrontPageView.get_user_roles
train
def get_user_roles(self): """Returns a list of roles for the current user """ if self.is_anonymous_user(): return [] current_user = ploneapi.user.get_current() return ploneapi.user.get_roles(user=current_user)
python
{ "resource": "" }
q21416
FrontPageView.set_versions
train
def set_versions(self): """Configure a list of product versions from portal.quickinstaller """ self.versions = {} self.upgrades = {} qi = getToolByName(self.context, "portal_quickinstaller") for key in qi.keys(): self.versions[key] = qi.getProductVersion(key) info = qi.upgradeInfo(key) if info and 'installedVersion' in info: self.upgrades[key] = info['installedVersion']
python
{ "resource": "" }
q21417
SamplingRound.getAnalysisRequests
train
def getAnalysisRequests(self): """ Return all the Analysis Request brains linked to the Sampling Round """ # I have to get the catalog in this way because I can't do it with 'self'... pc = getToolByName(api.portal.get(), 'portal_catalog') contentFilter = {'portal_type': 'AnalysisRequest', 'is_active': True, 'SamplingRoundUID': self.UID()} return pc(contentFilter)
python
{ "resource": "" }
q21418
SamplingRound.getClientContact
train
def getClientContact(self): """ Returns info from the Client contact who coordinates with the lab """ pc = getToolByName(api.portal.get(), 'portal_catalog') contentFilter = {'portal_type': 'Contact', 'id': self.client_contact} cnt = pc(contentFilter) cntdict = {'uid': '', 'id': '', 'fullname': '', 'url': ''} if len(cnt) == 1: cnt = cnt[0].getObject() cntdict = { 'uid': cnt.id, 'id': cnt.UID(), 'fullname': cnt.getFullname(), 'url': cnt.absolute_url(), } else: from bika.lims import logger error = "Error when looking for contact with id '%s'. " logger.exception(error, self.client_contact) return cntdict
python
{ "resource": "" }
q21419
SamplingRound.workflow_script_cancel
train
def workflow_script_cancel(self): """ When the round is cancelled, all its associated Samples and ARs are cancelled by the system. """ if skip(self, "cancel"): return self.reindexObject(idxs=["is_active", ]) # deactivate all analysis requests in this sampling round. analysis_requests = self.getAnalysisRequests() for ar in analysis_requests: ar_obj = ar.getObject() workflow = getToolByName(self, 'portal_workflow') if workflow.getInfoFor(ar_obj, 'review_state') != 'cancelled': doActionFor(ar.getObject(), 'cancel')
python
{ "resource": "" }
q21420
AnalysisRequestAddView.get_view_url
train
def get_view_url(self): """Return the current view url including request parameters """ request = self.request url = request.getURL() qs = request.getHeader("query_string") if not qs: return url return "{}?{}".format(url, qs)
python
{ "resource": "" }
q21421
AnalysisRequestAddView.get_currency
train
def get_currency(self): """Returns the configured currency """ bika_setup = api.get_bika_setup() currency = bika_setup.getCurrency() currencies = locales.getLocale('en').numbers.currencies return currencies[currency]
python
{ "resource": "" }
q21422
AnalysisRequestAddView.get_ar_count
train
def get_ar_count(self): """Return the ar_count request paramteter """ ar_count = 1 try: ar_count = int(self.request.form.get("ar_count", 1)) except (TypeError, ValueError): ar_count = 1 return ar_count
python
{ "resource": "" }
q21423
AnalysisRequestAddView.get_ar
train
def get_ar(self): """Create a temporary AR to fetch the fields from """ if not self.tmp_ar: logger.info("*** CREATING TEMPORARY AR ***") self.tmp_ar = self.context.restrictedTraverse( "portal_factory/AnalysisRequest/Request new analyses") return self.tmp_ar
python
{ "resource": "" }
q21424
AnalysisRequestAddView.generate_specifications
train
def generate_specifications(self, count=1): """Returns a mapping of count -> specification """ out = {} # mapping of UID index to AR objects {1: <AR1>, 2: <AR2> ...} copy_from = self.get_copy_from() for arnum in range(count): # get the source object source = copy_from.get(arnum) if source is None: out[arnum] = {} continue # get the results range from the source object results_range = source.getResultsRange() # mapping of keyword -> rr specification specification = {} for rr in results_range: specification[rr.get("keyword")] = rr out[arnum] = specification return out
python
{ "resource": "" }
q21425
AnalysisRequestAddView.get_copy_from
train
def get_copy_from(self): """Returns a mapping of UID index -> AR object """ # Create a mapping of source ARs for copy copy_from = self.request.form.get("copy_from", "").split(",") # clean out empty strings copy_from_uids = filter(lambda x: x, copy_from) out = dict().fromkeys(range(len(copy_from_uids))) for n, uid in enumerate(copy_from_uids): ar = self.get_object_by_uid(uid) if ar is None: continue out[n] = ar logger.info("get_copy_from: uids={}".format(copy_from_uids)) return out
python
{ "resource": "" }
q21426
AnalysisRequestAddView.get_default_value
train
def get_default_value(self, field, context, arnum): """Get the default value of the field """ name = field.getName() default = field.getDefault(context) if name == "Batch": batch = self.get_batch() if batch is not None: default = batch if name == "Client": client = self.get_client() if client is not None: default = client # only set default contact for first column if name == "Contact" and arnum == 0: contact = self.get_default_contact() if contact is not None: default = contact if name == "Sample": sample = self.get_sample() if sample is not None: default = sample # Querying for adapters to get default values from add-ons': # We don't know which fields the form will render since # some of them may come from add-ons. In order to obtain the default # value for those fields we take advantage of adapters. Adapters # registration should have the following format: # < adapter # factory = ... # for = "*" # provides = "bika.lims.interfaces.IGetDefaultFieldValueARAddHook" # name = "<fieldName>_default_value_hook" # / > hook_name = name + '_default_value_hook' adapter = queryAdapter( self.request, name=hook_name, interface=IGetDefaultFieldValueARAddHook) if adapter is not None: default = adapter(self.context) logger.info("get_default_value: context={} field={} value={} arnum={}" .format(context, name, default, arnum)) return default
python
{ "resource": "" }
q21427
AnalysisRequestAddView.get_field_value
train
def get_field_value(self, field, context): """Get the stored value of the field """ name = field.getName() value = context.getField(name).get(context) logger.info("get_field_value: context={} field={} value={}".format( context, name, value)) return value
python
{ "resource": "" }
q21428
AnalysisRequestAddView.get_client
train
def get_client(self): """Returns the Client """ context = self.context parent = api.get_parent(context) if context.portal_type == "Client": return context elif parent.portal_type == "Client": return parent elif context.portal_type == "Batch": return context.getClient() elif parent.portal_type == "Batch": return context.getClient() return None
python
{ "resource": "" }
q21429
AnalysisRequestAddView.get_batch
train
def get_batch(self): """Returns the Batch """ context = self.context parent = api.get_parent(context) if context.portal_type == "Batch": return context elif parent.portal_type == "Batch": return parent return None
python
{ "resource": "" }
q21430
AnalysisRequestAddView.get_parent_ar
train
def get_parent_ar(self, ar): """Returns the parent AR """ parent = ar.getParentAnalysisRequest() # Return immediately if we have no parent if parent is None: return None # Walk back the chain until we reach the source AR while True: pparent = parent.getParentAnalysisRequest() if pparent is None: break # remember the new parent parent = pparent return parent
python
{ "resource": "" }
q21431
AnalysisRequestAddView.is_field_visible
train
def is_field_visible(self, field): """Check if the field is visible """ context = self.context fieldname = field.getName() # hide the Client field on client and batch contexts if fieldname == "Client" and context.portal_type in ("Client", ): return False # hide the Batch field on batch contexts if fieldname == "Batch" and context.portal_type in ("Batch", ): return False return True
python
{ "resource": "" }
q21432
AnalysisRequestAddView.get_fields_with_visibility
train
def get_fields_with_visibility(self, visibility, mode="add"): """Return the AR fields with the current visibility """ ar = self.get_ar() mv = api.get_view("ar_add_manage", context=ar) mv.get_field_order() out = [] for field in mv.get_fields_with_visibility(visibility, mode): # check custom field condition visible = self.is_field_visible(field) if visible is False and visibility != "hidden": continue out.append(field) return out
python
{ "resource": "" }
q21433
AnalysisRequestAddView.get_service_categories
train
def get_service_categories(self, restricted=True): """Return all service categories in the right order :param restricted: Client settings restrict categories :type restricted: bool :returns: Category catalog results :rtype: brains """ bsc = api.get_tool("bika_setup_catalog") query = { "portal_type": "AnalysisCategory", "is_active": True, "sort_on": "sortable_title", } categories = bsc(query) client = self.get_client() if client and restricted: restricted_categories = client.getRestrictedCategories() restricted_category_ids = map( lambda c: c.getId(), restricted_categories) # keep correct order of categories if restricted_category_ids: categories = filter( lambda c: c.getId in restricted_category_ids, categories) return categories
python
{ "resource": "" }
q21434
AnalysisRequestAddView.get_services
train
def get_services(self, poc="lab"): """Return all Services :param poc: Point of capture (lab/field) :type poc: string :returns: Mapping of category -> list of services :rtype: dict """ bsc = api.get_tool("bika_setup_catalog") query = { "portal_type": "AnalysisService", "getPointOfCapture": poc, "is_active": True, "sort_on": "sortable_title", } services = bsc(query) categories = self.get_service_categories(restricted=False) analyses = {key: [] for key in map(lambda c: c.Title, categories)} # append the empty category as well analyses[""] = [] for brain in services: category = brain.getCategoryTitle if category in analyses: analyses[category].append(brain) return analyses
python
{ "resource": "" }
q21435
AnalysisRequestAddView.get_service_uid_from
train
def get_service_uid_from(self, analysis): """Return the service from the analysis """ analysis = api.get_object(analysis) return api.get_uid(analysis.getAnalysisService())
python
{ "resource": "" }
q21436
AnalysisRequestAddView.is_service_selected
train
def is_service_selected(self, service): """Checks if the given service is selected by one of the ARs. This is used to make the whole line visible or not. """ service_uid = api.get_uid(service) for arnum in range(self.ar_count): analyses = self.fieldvalues.get("Analyses-{}".format(arnum)) if not analyses: continue service_uids = map(self.get_service_uid_from, analyses) if service_uid in service_uids: return True return False
python
{ "resource": "" }
q21437
AnalysisRequestManageView.get_sorted_fields
train
def get_sorted_fields(self): """Return the sorted fields """ inf = float("inf") order = self.get_field_order() def field_cmp(field1, field2): _n1 = field1.getName() _n2 = field2.getName() _i1 = _n1 in order and order.index(_n1) + 1 or inf _i2 = _n2 in order and order.index(_n2) + 1 or inf return cmp(_i1, _i2) return sorted(self.get_fields(), cmp=field_cmp)
python
{ "resource": "" }
q21438
AnalysisRequestManageView.get_fields_with_visibility
train
def get_fields_with_visibility(self, visibility="edit", mode="add"): """Return the fields with visibility """ fields = self.get_sorted_fields() out = [] for field in fields: v = field.widget.isVisible( self.context, mode, default='invisible', field=field) if self.is_field_visible(field) is False: v = "hidden" visibility_guard = True # visibility_guard is a widget field defined in the schema in order # to know the visibility of the widget when the field is related to # a dynamically changing content such as workflows. For instance # those fields related to the workflow will be displayed only if # the workflow is enabled, otherwise they should not be shown. if 'visibility_guard' in dir(field.widget): visibility_guard = eval(field.widget.visibility_guard) if v == visibility and visibility_guard: out.append(field) return out
python
{ "resource": "" }
q21439
ajaxAnalysisRequestAddView.to_iso_date
train
def to_iso_date(self, dt): """Return the ISO representation of a date object """ if dt is None: return "" if isinstance(dt, DateTime): return dt.ISO8601() if isinstance(dt, datetime): return dt.isoformat() raise TypeError("{} is neiter an instance of DateTime nor datetime" .format(repr(dt)))
python
{ "resource": "" }
q21440
ajaxAnalysisRequestAddView.get_records
train
def get_records(self): """Returns a list of AR records Fields coming from `request.form` have a number prefix, e.g. Contact-0. Fields with the same suffix number are grouped together in a record. Each record represents the data for one column in the AR Add form and contains a mapping of the fieldName (w/o prefix) -> value. Example: [{"Contact": "Rita Mohale", ...}, {Contact: "Neil Standard"} ...] """ form = self.request.form ar_count = self.get_ar_count() records = [] # Group belonging AR fields together for arnum in range(ar_count): record = {} s1 = "-{}".format(arnum) keys = filter(lambda key: s1 in key, form.keys()) for key in keys: new_key = key.replace(s1, "") value = form.get(key) record[new_key] = value records.append(record) return records
python
{ "resource": "" }
q21441
ajaxAnalysisRequestAddView.get_uids_from_record
train
def get_uids_from_record(self, record, key): """Returns a list of parsed UIDs from a single form field identified by the given key. A form field ending with `_uid` can contain an empty value, a single UID or multiple UIDs separated by a comma. This method parses the UID value and returns a list of non-empty UIDs. """ value = record.get(key, None) if value is None: return [] if isinstance(value, basestring): value = value.split(",") return filter(lambda uid: uid, value)
python
{ "resource": "" }
q21442
ajaxAnalysisRequestAddView.get_objs_from_record
train
def get_objs_from_record(self, record, key): """Returns a mapping of UID -> object """ uids = self.get_uids_from_record(record, key) objs = map(self.get_object_by_uid, uids) return dict(zip(uids, objs))
python
{ "resource": "" }
q21443
ajaxAnalysisRequestAddView.get_base_info
train
def get_base_info(self, obj): """Returns the base info of an object """ if obj is None: return {} info = { "id": obj.getId(), "uid": obj.UID(), "title": obj.Title(), "description": obj.Description(), "url": obj.absolute_url(), } return info
python
{ "resource": "" }
q21444
ajaxAnalysisRequestAddView.get_service_info
train
def get_service_info(self, obj): """Returns the info for a Service """ info = self.get_base_info(obj) info.update({ "short_title": obj.getShortTitle(), "scientific_name": obj.getScientificName(), "unit": obj.getUnit(), "keyword": obj.getKeyword(), "methods": map(self.get_method_info, obj.getMethods()), "calculation": self.get_calculation_info(obj.getCalculation()), "price": obj.getPrice(), "currency_symbol": self.get_currency().symbol, "accredited": obj.getAccredited(), "category": obj.getCategoryTitle(), "poc": obj.getPointOfCapture(), }) dependencies = get_calculation_dependencies_for(obj).values() info["dependencies"] = map(self.get_base_info, dependencies) return info
python
{ "resource": "" }
q21445
ajaxAnalysisRequestAddView.get_template_info
train
def get_template_info(self, obj): """Returns the info for a Template """ client = self.get_client() client_uid = api.get_uid(client) if client else "" profile = obj.getAnalysisProfile() profile_uid = api.get_uid(profile) if profile else "" profile_title = profile.Title() if profile else "" sample_type = obj.getSampleType() sample_type_uid = api.get_uid(sample_type) if sample_type else "" sample_type_title = sample_type.Title() if sample_type else "" sample_point = obj.getSamplePoint() sample_point_uid = api.get_uid(sample_point) if sample_point else "" sample_point_title = sample_point.Title() if sample_point else "" service_uids = [] analyses_partitions = {} analyses = obj.getAnalyses() for record in analyses: service_uid = record.get("service_uid") service_uids.append(service_uid) analyses_partitions[service_uid] = record.get("partition") info = self.get_base_info(obj) info.update({ "analyses_partitions": analyses_partitions, "analysis_profile_title": profile_title, "analysis_profile_uid": profile_uid, "client_uid": client_uid, "composite": obj.getComposite(), "partitions": obj.getPartitions(), "remarks": obj.getRemarks(), "sample_point_title": sample_point_title, "sample_point_uid": sample_point_uid, "sample_type_title": sample_type_title, "sample_type_uid": sample_type_uid, "service_uids": service_uids, }) return info
python
{ "resource": "" }
q21446
ajaxAnalysisRequestAddView.get_profile_info
train
def get_profile_info(self, obj): """Returns the info for a Profile """ info = self.get_base_info(obj) info.update({}) return info
python
{ "resource": "" }
q21447
ajaxAnalysisRequestAddView.get_method_info
train
def get_method_info(self, obj): """Returns the info for a Method """ info = self.get_base_info(obj) info.update({}) return info
python
{ "resource": "" }
q21448
ajaxAnalysisRequestAddView.get_calculation_info
train
def get_calculation_info(self, obj): """Returns the info for a Calculation """ info = self.get_base_info(obj) info.update({}) return info
python
{ "resource": "" }
q21449
ajaxAnalysisRequestAddView.get_sampletype_info
train
def get_sampletype_info(self, obj): """Returns the info for a Sample Type """ info = self.get_base_info(obj) # Bika Setup folder bika_setup = api.get_bika_setup() # bika samplepoints bika_samplepoints = bika_setup.bika_samplepoints bika_samplepoints_uid = api.get_uid(bika_samplepoints) # bika analysisspecs bika_analysisspecs = bika_setup.bika_analysisspecs bika_analysisspecs_uid = api.get_uid(bika_analysisspecs) # client client = self.get_client() client_uid = client and api.get_uid(client) or "" # sample matrix sample_matrix = obj.getSampleMatrix() sample_matrix_uid = sample_matrix and sample_matrix.UID() or "" sample_matrix_title = sample_matrix and sample_matrix.Title() or "" # container type container_type = obj.getContainerType() container_type_uid = container_type and container_type.UID() or "" container_type_title = container_type and container_type.Title() or "" # sample points sample_points = obj.getSamplePoints() sample_point_uids = map(lambda sp: sp.UID(), sample_points) sample_point_titles = map(lambda sp: sp.Title(), sample_points) info.update({ "prefix": obj.getPrefix(), "minimum_volume": obj.getMinimumVolume(), "hazardous": obj.getHazardous(), "retention_period": obj.getRetentionPeriod(), "sample_matrix_uid": sample_matrix_uid, "sample_matrix_title": sample_matrix_title, "container_type_uid": container_type_uid, "container_type_title": container_type_title, "sample_point_uids": sample_point_uids, "sample_point_titles": sample_point_titles, }) # catalog queries for UI field filtering filter_queries = { "samplepoint": { "getSampleTypeTitles": [obj.Title(), ''], "getClientUID": [client_uid, bika_samplepoints_uid], "sort_order": "descending", }, "specification": { "getSampleTypeTitle": obj.Title(), "getClientUID": [client_uid, bika_analysisspecs_uid], "sort_order": "descending", } } info["filter_queries"] = filter_queries return info
python
{ "resource": "" }
q21450
ajaxAnalysisRequestAddView.get_sample_info
train
def get_sample_info(self, obj): """Returns the info for a Sample """ info = self.get_base_info(obj) # sample type sample_type = obj.getSampleType() sample_type_uid = sample_type and sample_type.UID() or "" sample_type_title = sample_type and sample_type.Title() or "" # sample condition sample_condition = obj.getSampleCondition() sample_condition_uid = sample_condition \ and sample_condition.UID() or "" sample_condition_title = sample_condition \ and sample_condition.Title() or "" # storage location storage_location = obj.getStorageLocation() storage_location_uid = storage_location \ and storage_location.UID() or "" storage_location_title = storage_location \ and storage_location.Title() or "" # sample point sample_point = obj.getSamplePoint() sample_point_uid = sample_point and sample_point.UID() or "" sample_point_title = sample_point and sample_point.Title() or "" # container type container_type = sample_type and sample_type.getContainerType() or None container_type_uid = container_type and container_type.UID() or "" container_type_title = container_type and container_type.Title() or "" # Sampling deviation deviation = obj.getSamplingDeviation() deviation_uid = deviation and deviation.UID() or "" deviation_title = deviation and deviation.Title() or "" info.update({ "sample_id": obj.getId(), "batch_uid": obj.getBatchUID() or None, "date_sampled": self.to_iso_date(obj.getDateSampled()), "sampling_date": self.to_iso_date(obj.getSamplingDate()), "sample_type_uid": sample_type_uid, "sample_type_title": sample_type_title, "container_type_uid": container_type_uid, "container_type_title": container_type_title, "sample_condition_uid": sample_condition_uid, "sample_condition_title": sample_condition_title, "storage_location_uid": storage_location_uid, "storage_location_title": storage_location_title, "sample_point_uid": sample_point_uid, "sample_point_title": sample_point_title, "environmental_conditions": obj.getEnvironmentalConditions(), "composite": obj.getComposite(), "client_uid": obj.getClientUID(), "client_title": obj.getClientTitle(), "contact": self.get_contact_info(obj.getContact()), "client_order_number": obj.getClientOrderNumber(), "client_sample_id": obj.getClientSampleID(), "client_reference": obj.getClientReference(), "sampling_deviation_uid": deviation_uid, "sampling_deviation_title": deviation_title, "sampling_workflow_enabled": obj.getSamplingWorkflowEnabled(), "remarks": obj.getRemarks(), }) return info
python
{ "resource": "" }
q21451
ajaxAnalysisRequestAddView.get_specification_info
train
def get_specification_info(self, obj): """Returns the info for a Specification """ info = self.get_base_info(obj) results_range = obj.getResultsRange() info.update({ "results_range": results_range, "sample_type_uid": obj.getSampleTypeUID(), "sample_type_title": obj.getSampleTypeTitle(), "client_uid": obj.getClientUID(), }) bsc = api.get_tool("bika_setup_catalog") def get_service_by_keyword(keyword): if keyword is None: return [] return map(api.get_object, bsc({ "portal_type": "AnalysisService", "getKeyword": keyword })) # append a mapping of service_uid -> specification specifications = {} for spec in results_range: service_uid = spec.get("uid") if service_uid is None: # service spec is not attached to a specific service, but to a # keyword for service in get_service_by_keyword(spec.get("keyword")): service_uid = api.get_uid(service) specifications[service_uid] = spec continue specifications[service_uid] = spec info["specifications"] = specifications # spec'd service UIDs info["service_uids"] = specifications.keys() return info
python
{ "resource": "" }
q21452
ajaxAnalysisRequestAddView.get_container_info
train
def get_container_info(self, obj): """Returns the info for a Container """ info = self.get_base_info(obj) info.update({}) return info
python
{ "resource": "" }
q21453
ajaxAnalysisRequestAddView.get_preservation_info
train
def get_preservation_info(self, obj): """Returns the info for a Preservation """ info = self.get_base_info(obj) info.update({}) return info
python
{ "resource": "" }
q21454
ajaxAnalysisRequestAddView.ajax_get_service
train
def ajax_get_service(self): """Returns the services information """ uid = self.request.form.get("uid", None) if uid is None: return self.error("Invalid UID", status=400) service = self.get_object_by_uid(uid) if not service: return self.error("Service not found", status=404) info = self.get_service_info(service) return info
python
{ "resource": "" }
q21455
ajaxAnalysisRequestAddView.ajax_recalculate_prices
train
def ajax_recalculate_prices(self): """Recalculate prices for all ARs """ # When the option "Include and display pricing information" in # Bika Setup Accounting tab is not selected if not self.show_recalculate_prices(): return {} # The sorted records from the request records = self.get_records() client = self.get_client() bika_setup = api.get_bika_setup() member_discount = float(bika_setup.getMemberDiscount()) member_discount_applies = False if client: member_discount_applies = client.getMemberDiscountApplies() prices = {} for n, record in enumerate(records): ardiscount_amount = 0.00 arservices_price = 0.00 arprofiles_price = 0.00 arprofiles_vat_amount = 0.00 arservice_vat_amount = 0.00 services_from_priced_profile = [] profile_uids = record.get("Profiles_uid", "").split(",") profile_uids = filter(lambda x: x, profile_uids) profiles = map(self.get_object_by_uid, profile_uids) services = map(self.get_object_by_uid, record.get("Analyses", [])) # ANALYSIS PROFILES PRICE for profile in profiles: use_profile_price = profile.getUseAnalysisProfilePrice() if not use_profile_price: continue profile_price = float(profile.getAnalysisProfilePrice()) arprofiles_price += profile_price arprofiles_vat_amount += profile.getVATAmount() profile_services = profile.getService() services_from_priced_profile.extend(profile_services) # ANALYSIS SERVICES PRICE for service in services: if service in services_from_priced_profile: continue service_price = float(service.getPrice()) # service_vat = float(service.getVAT()) service_vat_amount = float(service.getVATAmount()) arservice_vat_amount += service_vat_amount arservices_price += service_price base_price = arservices_price + arprofiles_price # Calculate the member discount if it applies if member_discount and member_discount_applies: logger.info("Member discount applies with {}%".format( member_discount)) ardiscount_amount = base_price * member_discount / 100 subtotal = base_price - ardiscount_amount vat_amount = arprofiles_vat_amount + arservice_vat_amount total = subtotal + vat_amount prices[n] = { "discount": "{0:.2f}".format(ardiscount_amount), "subtotal": "{0:.2f}".format(subtotal), "vat": "{0:.2f}".format(vat_amount), "total": "{0:.2f}".format(total), } logger.info("Prices for AR {}: Discount={discount} " "VAT={vat} Subtotal={subtotal} total={total}" .format(n, **prices[n])) return prices
python
{ "resource": "" }
q21456
GeneXpertParser._handle_header
train
def _handle_header(self, sline): """ A function for lines with only ONE COLUMN. If line has only one column, then it is either a Section or Subsection name. """ # All characters UPPER CASE means it is a Section. if sline[0].isupper(): self._cur_section = sline[0] self._cur_sub_section = '' return 0 else: self._cur_sub_section = sline[0] if sline[0] == SUBSECTION_ANALYTE_RESULT: # This is Analyte Result Line, next line will be headers of # results table self._is_header_line = True return 0 elif sline[0] in ('Detail', 'Errors', 'Messages'): # It is end of Analyte Result sub-section. Add the last # record as a raw result and reset everything. self._submit_result() return 0
python
{ "resource": "" }
q21457
GeneXpertParser._submit_result
train
def _submit_result(self): """ Adding current values as a Raw Result and Resetting everything. Notice that we are not calculating final result of assay. We just set NP and GP values and in Bika, AS will have a Calculation to generate final result based on NP and GP values. """ if self._cur_res_id and self._cur_values: # Setting DefaultResult just because it is obligatory. However, # it won't be used because AS must have a Calculation based on # GP and NP results. self._cur_values[self._keyword]['DefaultResult'] = 'DefResult' self._cur_values[self._keyword]['DefResult'] = '' # If we add results as a raw result, AnalysisResultsImporter will # automatically import them to the system. The only important thing # here is to respect the dictionary format. self._addRawResult(self._cur_res_id, self._cur_values) self._reset()
python
{ "resource": "" }
q21458
GeneXpertParser._format_keyword
train
def _format_keyword(self, keyword): """ Removing special character from a keyword. Analysis Services must have this kind of keywords. E.g. if assay name from GeneXpert Instrument is 'Ebola RUO', an AS must be created on Bika with the keyword 'EbolaRUO' """ import re result = '' if keyword: result = re.sub(r"\W", "", keyword) # Remove underscores ('_') too. result = re.sub(r"_", "", result) return result
python
{ "resource": "" }
q21459
ManageResultsView.get_instrument_title
train
def get_instrument_title(self): """Return the current instrument title """ instrument = self.context.getInstrument() if not instrument: return "" return api.get_title(instrument)
python
{ "resource": "" }
q21460
ManageResultsView.is_assignment_allowed
train
def is_assignment_allowed(self): """Check if analyst assignment is allowed """ if not self.is_manage_allowed(): return False review_state = api.get_workflow_status_of(self.context) edit_states = ["open", "attachment_due", "to_be_verified"] return review_state in edit_states
python
{ "resource": "" }
q21461
ManageResultsView.get_wide_interims
train
def get_wide_interims(self): """Returns a dictionary with the analyses services from the current worksheet which have at least one interim with 'Wide' attribute set to true and that have not been yet submitted The structure of the returned dictionary is the following: <Analysis_keyword>: { 'analysis': <Analysis_name>, 'keyword': <Analysis_keyword>, 'interims': { <Interim_keyword>: { 'value': <Interim_default_value>, 'keyword': <Interim_key>, 'title': <Interim_title> } } } """ outdict = {} allowed_states = ['assigned', 'unassigned'] for analysis in self.context.getAnalyses(): # TODO Workflow - Analysis Use a query instead of this if api.get_workflow_status_of(analysis) not in allowed_states: continue if analysis.getKeyword() in outdict.keys(): continue calculation = analysis.getCalculation() if not calculation: continue andict = { "analysis": analysis.Title(), "keyword": analysis.getKeyword(), "interims": {} } # Analysis Service interim defaults for field in analysis.getInterimFields(): if field.get("wide", False): andict["interims"][field["keyword"]] = field # Interims from calculation for field in calculation.getInterimFields(): if field["keyword"] not in andict["interims"].keys() \ and field.get("wide", False): andict["interims"][field["keyword"]] = field if andict["interims"]: outdict[analysis.getKeyword()] = andict return outdict
python
{ "resource": "" }
q21462
editableFields
train
def editableFields(self, instance, visible_only=False): """Returns a list of editable fields for the given instance """ ret = [] portal = getToolByName(instance, 'portal_url').getPortalObject() for field in self.fields(): if field.writeable(instance, debug=False) and \ (not visible_only or field.widget.isVisible( instance, mode='edit', field=field) != 'invisible') and \ field.widget.testCondition(instance.aq_parent, portal, instance): ret.append(field) return ret
python
{ "resource": "" }
q21463
isVisible
train
def isVisible(self, instance, mode='view', default="visible", field=None): """decide if a field is visible in a given mode -> 'state'. """ # Emulate Products.Archetypes.Widget.TypesWidget#isVisible first vis_dic = getattr(aq_base(self), 'visible', _marker) state = default if vis_dic is _marker: return state if type(vis_dic) is DictType: state = vis_dic.get(mode, state) elif not vis_dic: state = 'invisible' elif vis_dic < 0: state = 'hidden' # Our custom code starts here if not field: return state # Look for visibility from the adapters provided by IATWidgetVisibility adapters = sorted(getAdapters([instance], IATWidgetVisibility), key=lambda adapter: getattr(adapter[1], "sort", 1000), reverse=True) for adapter_name, adapter in adapters: if field.getName() not in getattr(adapter, "field_names", []): # Omit those adapters that are not suitable for this field continue adapter_state = adapter(instance, mode, field, state) adapter_name = adapter.__class__.__name__ logger.info("IATWidgetVisibility rule {} for {}.{} ({}): {} -> {}" .format(adapter_name, instance.id, field.getName(), mode, state, adapter_state)) if adapter_state == state: continue return adapter_state return state
python
{ "resource": "" }
q21464
skip
train
def skip(instance, action, peek=False, unskip=False): """Returns True if the transition is to be SKIPPED peek - True just checks the value, does not set. unskip - remove skip key (for manual overrides). called with only (instance, action_id), this will set the request variable preventing the cascade's from re-transitioning the object and return None. """ uid = callable(instance.UID) and instance.UID() or instance.UID skipkey = "%s_%s" % (uid, action) if 'workflow_skiplist' not in instance.REQUEST: if not peek and not unskip: instance.REQUEST['workflow_skiplist'] = [skipkey, ] else: if skipkey in instance.REQUEST['workflow_skiplist']: if unskip: instance.REQUEST['workflow_skiplist'].remove(skipkey) else: return True else: if not peek and not unskip: instance.REQUEST["workflow_skiplist"].append(skipkey)
python
{ "resource": "" }
q21465
call_workflow_event
train
def call_workflow_event(instance, event, after=True): """Calls the instance's workflow event """ if not event.transition: return False portal_type = instance.portal_type wf_module = _load_wf_module('{}.events'.format(portal_type.lower())) if not wf_module: return False # Inspect if event_<transition_id> function exists in the module prefix = after and "after" or "before" func_name = "{}_{}".format(prefix, event.transition.id) func = getattr(wf_module, func_name, False) if not func: return False logger.info('WF event: {0}.events.{1}' .format(portal_type.lower(), func_name)) func(instance) return True
python
{ "resource": "" }
q21466
get_workflow_actions
train
def get_workflow_actions(obj): """ Compile a list of possible workflow transitions for this object """ def translate(id): return t(PMF(id + "_transition_title")) transids = getAllowedTransitions(obj) actions = [{'id': it, 'title': translate(it)} for it in transids] return actions
python
{ "resource": "" }
q21467
get_review_history_statuses
train
def get_review_history_statuses(instance, reverse=False): """Returns a list with the statuses of the instance from the review_history """ review_history = getReviewHistory(instance, reverse=reverse) return map(lambda event: event["review_state"], review_history)
python
{ "resource": "" }
q21468
get_prev_status_from_history
train
def get_prev_status_from_history(instance, status=None): """Returns the previous status of the object. If status is set, returns the previous status before the object reached the status passed in. If instance has reached the status passed in more than once, only the last one is considered. """ target = status or api.get_workflow_status_of(instance) history = getReviewHistory(instance, reverse=True) history = map(lambda event: event["review_state"], history) if target not in history or history.index(target) == len(history)-1: return None return history[history.index(target)+1]
python
{ "resource": "" }
q21469
in_state
train
def in_state(obj, states, stateflowid='review_state'): """ Returns if the object passed matches with the states passed in """ if not states: return False obj_state = getCurrentState(obj, stateflowid=stateflowid) return obj_state in states
python
{ "resource": "" }
q21470
getTransitionDate
train
def getTransitionDate(obj, action_id, return_as_datetime=False): """ Returns date of action for object. Sometimes we need this date in Datetime format and that's why added return_as_datetime param. """ review_history = getReviewHistory(obj) for event in review_history: if event.get('action') == action_id: evtime = event.get('time') if return_as_datetime: return evtime if evtime: value = ulocalized_time(evtime, long_format=True, time_only=False, context=obj) return value return None
python
{ "resource": "" }
q21471
guard_handler
train
def guard_handler(instance, transition_id): """Generic workflow guard handler that returns true if the transition_id passed in can be performed to the instance passed in. This function is called automatically by a Script (Python) located at bika/lims/skins/guard_handler.py, which in turn is fired by Zope when an expression like "python:here.guard_handler('<transition_id>')" is set to any given guard (used by default in all bika's DC Workflow guards). Walks through bika.lims.workflow.<obj_type>.guards and looks for a function that matches with 'guard_<transition_id>'. If found, calls the function and returns its value (true or false). If not found, returns True by default. :param instance: the object for which the transition_id has to be evaluated :param transition_id: the id of the transition :type instance: ATContentType :type transition_id: string :return: true if the transition can be performed to the passed in instance :rtype: bool """ if not instance: return True clazz_name = instance.portal_type # Inspect if bika.lims.workflow.<clazzname>.<guards> module exists wf_module = _load_wf_module('{0}.guards'.format(clazz_name.lower())) if not wf_module: return True # Inspect if guard_<transition_id> function exists in the above module key = 'guard_{0}'.format(transition_id) guard = getattr(wf_module, key, False) if not guard: return True #logger.info('{0}.guards.{1}'.format(clazz_name.lower(), key)) return guard(instance)
python
{ "resource": "" }
q21472
_load_wf_module
train
def _load_wf_module(module_relative_name): """Loads a python module based on the module relative name passed in. At first, tries to get the module from sys.modules. If not found there, the function tries to load it by using importlib. Returns None if no module found or importlib is unable to load it because of errors. Eg: _load_wf_module('sample.events') will try to load the module 'bika.lims.workflow.sample.events' :param modrelname: relative name of the module to be loaded :type modrelname: string :return: the module :rtype: module """ if not module_relative_name: return None if not isinstance(module_relative_name, basestring): return None rootmodname = __name__ modulekey = '{0}.{1}'.format(rootmodname, module_relative_name) if modulekey in sys.modules: return sys.modules.get(modulekey, None) # Try to load the module recursively modname = None tokens = module_relative_name.split('.') for part in tokens: modname = '.'.join([modname, part]) if modname else part import importlib try: _module = importlib.import_module('.'+modname, package=rootmodname) if not _module: return None except Exception: return None return sys.modules.get(modulekey, None)
python
{ "resource": "" }
q21473
push_reindex_to_actions_pool
train
def push_reindex_to_actions_pool(obj, idxs=None): """Push a reindex job to the actions handler pool """ indexes = idxs and idxs or [] pool = ActionHandlerPool.get_instance() pool.push(obj, "reindex", success=True, idxs=indexes)
python
{ "resource": "" }
q21474
ActionHandlerPool.push
train
def push(self, instance, action, success, idxs=_marker): """Adds an instance into the pool, to be reindexed on resume """ uid = api.get_uid(instance) info = self.objects.get(uid, {}) idx = [] if idxs is _marker else idxs info[action] = {'success': success, 'idxs': idx} self.objects[uid] = info
python
{ "resource": "" }
q21475
ActionHandlerPool.succeed
train
def succeed(self, instance, action): """Returns if the task for the instance took place successfully """ uid = api.get_uid(instance) return self.objects.get(uid, {}).get(action, {}).get('success', False)
python
{ "resource": "" }
q21476
ActionHandlerPool.resume
train
def resume(self): """Resumes the pool and reindex all objects processed """ self.num_calls -= 1 if self.num_calls > 0: return logger.info("Resume actions for {} objects".format(len(self))) # Fetch the objects from the pool processed = list() for brain in api.search(dict(UID=self.objects.keys()), UID_CATALOG): uid = api.get_uid(brain) if uid in processed: # This object has been processed already, do nothing continue # Reindex the object obj = api.get_object(brain) idxs = self.get_indexes(uid) idxs_str = idxs and ', '.join(idxs) or "-- All indexes --" logger.info("Reindexing {}: {}".format(obj.getId(), idxs_str)) obj.reindexObject(idxs=idxs) processed.append(uid) # Cleanup the pool logger.info("Objects processed: {}".format(len(processed))) self.objects = collections.OrderedDict()
python
{ "resource": "" }
q21477
WorkflowMenu.getMenuItems
train
def getMenuItems(self, context, request): """Overrides the workflow actions menu displayed top right in the object's view. Displays the current state of the object, as well as a list with the actions that can be performed. The option "Advanced.." is not displayed and the list is populated with all allowed transitions for the object. """ actions = super(WorkflowMenu, self).getMenuItems(context, request) # Remove status history menu item ('Advanced...') actions = [action for action in actions if not action['action'].endswith('/content_status_history')] return actions
python
{ "resource": "" }
q21478
WorkflowActionReceiveAdapter.is_auto_partition_required
train
def is_auto_partition_required(self, brain_or_object): """Returns whether the passed in object needs to be partitioned """ obj = api.get_object(brain_or_object) if not IAnalysisRequest.providedBy(obj): return False template = obj.getTemplate() return template and template.getAutoPartition()
python
{ "resource": "" }
q21479
WorkflowActionInvalidateAdapter.notify_ar_retract
train
def notify_ar_retract(self, sample): """Sends an email notification to sample's client contact if the sample passed in has a retest associated """ retest = sample.getRetest() if not retest: logger.warn("No retest found for {}. And it should!" .format(api.get_id(sample))) return # Email fields sample_id = api.get_id(sample) subject = t(_("Erroneous result publication from {}").format(sample_id)) emails_lab = self.get_lab_managers_formatted_emails() emails_sample = self.get_sample_contacts_formatted_emails(sample) recipients = list(set(emails_lab + emails_sample)) msg = MIMEMultipart("related") msg["Subject"] = subject msg["From"] = self.get_laboratory_formatted_email() msg["To"] = ", ".join(recipients) body = self.get_email_body(sample) msg_txt = MIMEText(safe_unicode(body).encode('utf-8'), _subtype='html') msg.preamble = 'This is a multi-part MIME message.' msg.attach(msg_txt) # Send the email try: host = api.get_tool("MailHost") host.send(msg.as_string(), immediate=True) except Exception as err_msg: message = _("Unable to send an email to alert lab " "client contacts that the Sample has been " "retracted: ${error}", mapping={'error': safe_unicode(err_msg)}) self.context.plone_utils.addPortalMessage(message, 'warning')
python
{ "resource": "" }
q21480
WorkflowActionInvalidateAdapter.get_email_body
train
def get_email_body(self, sample): """Returns the email body text """ retest = sample.getRetest() lab_address = api.get_bika_setup().laboratory.getPrintAddress() setup = api.get_setup() body = Template(setup.getEmailBodySampleInvalidation())\ .safe_substitute( dict(sample_link=self.get_html_link(sample), retest_link=self.get_html_link(retest), sample_id=api.get_id(sample), retest_id=api.get_id(retest), lab_address="<br/>".join(lab_address))) return body
python
{ "resource": "" }
q21481
WorkflowActionInvalidateAdapter.get_laboratory_formatted_email
train
def get_laboratory_formatted_email(self): """Returns the laboratory email formatted """ lab = api.get_bika_setup().laboratory return self.get_formatted_email((lab.getName(), lab.getEmailAddress()))
python
{ "resource": "" }
q21482
WorkflowActionInvalidateAdapter.get_lab_managers_formatted_emails
train
def get_lab_managers_formatted_emails(self): """Returns a list with lab managers formatted emails """ users = api.get_users_by_roles("LabManager") users = map(lambda user: (user.getProperty("fullname"), user.getProperty("email")), users) return map(self.get_formatted_email, users)
python
{ "resource": "" }
q21483
WorkflowActionInvalidateAdapter.get_contact_formatted_email
train
def get_contact_formatted_email(self, contact): """Returns a string with the formatted email for the given contact """ contact_name = contact.Title() contact_email = contact.getEmailAddress() return self.get_formatted_email((contact_name, contact_email))
python
{ "resource": "" }
q21484
WorkflowActionInvalidateAdapter.get_sample_contacts_formatted_emails
train
def get_sample_contacts_formatted_emails(self, sample): """Returns a list with the formatted emails from sample contacts """ contacts = list(set([sample.getContact()] + sample.getCCContact())) return map(self.get_contact_formatted_email, contacts)
python
{ "resource": "" }
q21485
WorkflowActionInvalidateAdapter.get_html_link
train
def get_html_link(self, obj): """Returns an html formatted link for the given object """ return "<a href='{}'>{}</a>".format(api.get_url(obj), api.get_id(obj))
python
{ "resource": "" }
q21486
WorkflowActionPrintSampleAdapter.set_printed_time
train
def set_printed_time(self, sample): """Updates the printed time of the last results report from the sample """ if api.get_workflow_status_of(sample) != "published": return False reports = sample.objectValues("ARReport") reports = sorted(reports, key=lambda report: report.getDatePublished()) last_report = reports[-1] if not last_report.getDatePrinted(): last_report.setDatePrinted(DateTime()) sample.reindexObject(idxs=["getPrinted"]) return True
python
{ "resource": "" }
q21487
WorkflowActionSampleAdapter.set_sampler_info
train
def set_sampler_info(self, sample): """Updates the Sampler and the Sample Date with the values provided in the request. If neither Sampler nor SampleDate are present in the request, returns False """ if sample.getSampler() and sample.getDateSampled(): # Sampler and Date Sampled already set. This is correct return True sampler = self.get_form_value("Sampler", sample, sample.getSampler()) sampled = self.get_form_value("getDateSampled", sample, sample.getDateSampled()) if not all([sampler, sampled]): return False sample.setSampler(sampler) sample.setDateSampled(DateTime(sampled)) return True
python
{ "resource": "" }
q21488
WorkflowActionPreserveAdapter.set_preserver_info
train
def set_preserver_info(self, sample): """Updates the Preserver and the Date Preserved with the values provided in the request. If neither Preserver nor DatePreserved are present in the request, returns False """ if sample.getPreserver() and sample.getDatePreserved(): # Preserver and Date Preserved already set. This is correct return True preserver = self.get_form_value("Preserver", sample, sample.getPreserver()) preserved = self.get_form_value("getDatePreserved", sample.getDatePreserved()) if not all([preserver, preserved]): return False sample.setPreserver(preserver) sample.setDatePreserver(DateTime(preserved)) return True
python
{ "resource": "" }
q21489
WorkflowActionScheduleSamplingAdapter.set_sampling_info
train
def set_sampling_info(self, sample): """Updates the scheduled Sampling sampler and the Sampling Date with the values provided in the request. If neither Sampling sampler nor Sampling Date are present in the request, returns False """ if sample.getScheduledSamplingSampler() and sample.getSamplingDate(): return True sampler = self.get_form_value("getScheduledSamplingSampler", sample, sample.getScheduledSamplingSampler()) sampled = self.get_form_value("getSamplingDate", sample.getSamplingDate()) if not all([sampler, sampled]): return False sample.setScheduledSamplingSampler(sampler) sample.setSamplingDate(DateTime(sampled)) return True
python
{ "resource": "" }
q21490
WorkflowActionSaveAnalysesAdapter.is_hidden
train
def is_hidden(self, service): """Returns whether the request Hidden param for the given obj is True """ uid = api.get_uid(service) hidden_ans = self.request.form.get("Hidden", {}) return hidden_ans.get(uid, "") == "on"
python
{ "resource": "" }
q21491
WorkflowActionSaveAnalysesAdapter.get_specs
train
def get_specs(self, service): """Returns the analysis specs available in the request for the given uid """ uid = api.get_uid(service) keyword = service.getKeyword() specs = ResultsRangeDict(keyword=keyword, uid=uid).copy() for key in specs.keys(): specs_value = self.request.form.get(key, [{}])[0].get(uid, None) specs[key] = specs_value or specs.get(key) return specs
python
{ "resource": "" }
q21492
DefaultReferenceWidgetVocabulary.search_fields
train
def search_fields(self): """Returns the object field names to search against """ search_fields = self.request.get("search_fields", None) if not search_fields: return [] search_fields = json.loads(_u(search_fields)) return search_fields
python
{ "resource": "" }
q21493
DefaultReferenceWidgetVocabulary.search_term
train
def search_term(self): """Returns the search term """ search_term = _c(self.request.get("searchTerm", "")) return search_term.lower().strip()
python
{ "resource": "" }
q21494
DefaultReferenceWidgetVocabulary.get_query_from_request
train
def get_query_from_request(self, name): """Returns the query inferred from the request """ query = self.request.get(name, "{}") # json.loads does unicode conversion, which will fail in the catalog # search for some cases. So we need to convert the strings to utf8 # https://github.com/senaite/senaite.core/issues/443 query = json.loads(query) return self.to_utf8(query)
python
{ "resource": "" }
q21495
DefaultReferenceWidgetVocabulary.get_raw_query
train
def get_raw_query(self): """Returns the raw query to use for current search, based on the base query + update query """ query = self.base_query.copy() search_query = self.search_query.copy() query.update(search_query) # Add sorting criteria sorting = self.resolve_sorting(query) query.update(sorting) # Check if sort_on is an index and if is sortable. Otherwise, assume # the sorting must be done manually catalog = api.get_tool(self.catalog_name) sort_on = query.get("sort_on", None) if sort_on and not self.is_sortable_index(sort_on, catalog): del(query["sort_on"]) return query
python
{ "resource": "" }
q21496
DefaultReferenceWidgetVocabulary.resolve_sorting
train
def resolve_sorting(self, query): """Resolves the sorting criteria for the given query """ sorting = {} # Sort on sort_on = query.get("sidx", None) sort_on = sort_on or query.get("sort_on", None) sort_on = sort_on == "Title" and "sortable_title" or sort_on if sort_on: sorting["sort_on"] = sort_on # Sort order sort_order = query.get("sord", None) sort_order = sort_order or query.get("sort_order", None) if sort_order in ["desc", "reverse", "rev", "descending"]: sorting["sort_order"] = "descending" else: sorting["sort_order"] = "ascending" # Sort limit sort_limit = api.to_int(query.get("limit", 30), default=30) if sort_limit: sorting["sort_limit"] = sort_limit return sorting
python
{ "resource": "" }
q21497
DefaultReferenceWidgetVocabulary.is_sortable_index
train
def is_sortable_index(self, index_name, catalog): """Returns whether the index is sortable """ index = self.get_index(index_name, catalog) if not index: return False return index.meta_type in ["FieldIndex", "DateIndex"]
python
{ "resource": "" }
q21498
DefaultReferenceWidgetVocabulary.get_index
train
def get_index(self, field_name, catalog): """Returns the index of the catalog for the given field_name, if any """ index = catalog.Indexes.get(field_name, None) if not index and field_name == "Title": # Legacy return self.get_index("sortable_title", catalog) return index
python
{ "resource": "" }
q21499
DefaultReferenceWidgetVocabulary.search
train
def search(self, query, search_term, search_field, catalog): """Performs a search against the catalog and returns the brains """ logger.info("Reference Widget Catalog: {}".format(catalog.id)) if not search_term: return catalog(query) index = self.get_index(search_field, catalog) if not index: logger.warn("*** Index not found: '{}'".format(search_field)) return [] meta = index.meta_type if meta == "TextIndexNG3": query[index.id] = "{}*".format(search_term) elif meta == "ZCTextIndex": logger.warn("*** Field '{}' ({}). Better use TextIndexNG3" .format(meta, search_field)) query[index.id] = "{}*".format(search_term) elif meta in ["FieldIndex", "KeywordIndex"]: logger.warn("*** Field '{}' ({}). Better use TextIndexNG3" .format(meta, search_field)) query[index.id] = search_term else: logger.warn("*** Index '{}' ({}) not supported" .format(search_field, meta)) return [] logger.info("Reference Widget Query: {}".format(repr(query))) return catalog(query)
python
{ "resource": "" }