_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q21500
|
get_mapped_permissions_for
|
train
|
def get_mapped_permissions_for(brain_or_object):
"""Get the mapped permissions for the given object
A mapped permission is one that is used in the object.
Each permission string, e.g. "senaite.core: Field: Edit Analysis Remarks" is
translated by the function `AccessControl.Permission.pname` to a valid
attribute name:
>>> from bika.lims.permissions import FieldEditAnalysisResult
>>> AccessControl.Permission import pname
>>> pname(FieldEditAnalysisResult)
_Field__Edit_Result_Permission
This attribute is looked up in the object by `getPermissionMapping`:
>>> from AccessControl.PermissionMapping import getPermissionMapping
>>> getPermissionMapping(FieldEditAnalysisResult, wrapper)
("Manager", "Sampler")
Therefore, only those permissions which have roles mapped on the object
or by objects within the acquisition chain are considered.
Code extracted from `IRoleManager.manage_getUserRolesAndPermissions`
:param brain_or_object: Catalog brain or object
:returns: List of permissions
"""
obj = api.get_object(brain_or_object)
mapping = obj.manage_getPermissionMapping()
return map(lambda item: item["permission_name"], mapping)
|
python
|
{
"resource": ""
}
|
q21501
|
get_allowed_permissions_for
|
train
|
def get_allowed_permissions_for(brain_or_object, user=None):
"""Get the allowed permissions for the given object
Code extracted from `IRoleManager.manage_getUserRolesAndPermissions`
:param brain_or_object: Catalog brain or object
:param user: A user ID, user object or None (for the current user)
:returns: List of allowed permissions
"""
allowed = []
user = get_user(user)
obj = api.get_object(brain_or_object)
for permission in get_mapped_permissions_for(brain_or_object):
if user.has_permission(permission, obj):
allowed.append(permission)
return allowed
|
python
|
{
"resource": ""
}
|
q21502
|
get_disallowed_permissions_for
|
train
|
def get_disallowed_permissions_for(brain_or_object, user=None):
"""Get the disallowed permissions for the given object
Code extracted from `IRoleManager.manage_getUserRolesAndPermissions`
:brain_or_object: Catalog brain or object
:param user: A user ID, user object or None (for the current user)
:returns: List of disallowed permissions
"""
disallowed = []
user = get_user(user)
obj = api.get_object(brain_or_object)
for permission in get_mapped_permissions_for(brain_or_object):
if not user.has_permission(permission, obj):
disallowed.append(permission)
return disallowed
|
python
|
{
"resource": ""
}
|
q21503
|
check_permission
|
train
|
def check_permission(permission, brain_or_object):
"""Check whether the security context allows the given permission on
the given brain or object.
N.B.: This includes also acquired permissions
:param permission: Permission name
:brain_or_object: Catalog brain or object
:returns: True if the permission is granted
"""
sm = get_security_manager()
obj = api.get_object(brain_or_object)
return sm.checkPermission(permission, obj) == 1
|
python
|
{
"resource": ""
}
|
q21504
|
get_permissions_for_role
|
train
|
def get_permissions_for_role(role, brain_or_object):
"""Return the permissions of the role which are granted on the object
Code extracted from `IRoleManager.permissionsOfRole`
:param role: The role to check the permission
:param brain_or_object: Catalog brain or object
:returns: List of permissions of the role
"""
obj = api.get_object(brain_or_object)
# Raise an error if the role is invalid
valid_roles = get_valid_roles_for(obj)
if role not in valid_roles:
raise ValueError("The Role '{}' is invalid.".format(role))
out = []
for item in obj.ac_inherited_permissions(1):
name, value = item[:2]
# Permission maps a named permission to a set of attribute names
permission = Permission(name, value, obj)
if role in permission.getRoles():
out.append(name)
return out
|
python
|
{
"resource": ""
}
|
q21505
|
get_roles_for_permission
|
train
|
def get_roles_for_permission(permission, brain_or_object):
"""Return the roles of the permission that is granted on the object
Code extracted from `IRoleManager.rolesOfPermission`
:param permission: The permission to get the roles
:param brain_or_object: Catalog brain or object
:returns: List of roles having the permission
"""
obj = api.get_object(brain_or_object)
valid_roles = get_valid_roles_for(obj)
for item in obj.ac_inherited_permissions(1):
name, value = item[:2]
# found the requested permission
if name == permission:
# Permission maps a named permission to a set of attribute names
permission = Permission(name, value, obj)
roles = permission.getRoles()
# return only valid roles that have the permission granted
return filter(lambda r: r in valid_roles, roles)
# Raise an error if the permission is invalid
raise ValueError("The permission {} is invalid.".format(permission))
|
python
|
{
"resource": ""
}
|
q21506
|
get_local_roles_for
|
train
|
def get_local_roles_for(brain_or_object, user=None):
"""Get the local defined roles on the context
Code extracted from `IRoleManager.get_local_roles_for_userid`
:param brain_or_object: Catalog brain or object
:param user: A user ID, user object or None (for the current user)
:returns: List of granted local roles on the given object
"""
user_id = get_user_id(user)
obj = api.get_object(brain_or_object)
return sorted(obj.get_local_roles_for_userid(user_id))
|
python
|
{
"resource": ""
}
|
q21507
|
grant_local_roles_for
|
train
|
def grant_local_roles_for(brain_or_object, roles, user=None):
"""Grant local roles for the object
Code extracted from `IRoleManager.manage_addLocalRoles`
:param brain_or_object: Catalog brain or object
:param user: A user ID, user object or None (for the current user)
:param roles: The local roles to grant for the current user
"""
user_id = get_user_id(user)
obj = api.get_object(brain_or_object)
if isinstance(roles, basestring):
roles = [roles]
obj.manage_addLocalRoles(user_id, roles)
return get_local_roles_for(brain_or_object)
|
python
|
{
"resource": ""
}
|
q21508
|
revoke_local_roles_for
|
train
|
def revoke_local_roles_for(brain_or_object, roles, user=None):
"""Revoke local roles for the object
Code extracted from `IRoleManager.manage_setLocalRoles`
:param brain_or_object: Catalog brain or object
:param roles: The local roles to revoke for the current user
:param user: A user ID, user object or None (for the current user)
"""
user_id = get_user_id(user)
obj = api.get_object(brain_or_object)
valid_roles = get_valid_roles_for(obj)
to_grant = list(get_local_roles_for(obj))
if isinstance(roles, basestring):
roles = [roles]
for role in roles:
if role in to_grant:
if role not in valid_roles:
raise ValueError("The Role '{}' is invalid.".format(role))
# Remove the role
to_grant.remove(role)
if len(to_grant) > 0:
obj.manage_setLocalRoles(user_id, to_grant)
else:
obj.manage_delLocalRoles([user_id])
return get_local_roles_for(brain_or_object)
|
python
|
{
"resource": ""
}
|
q21509
|
grant_permission_for
|
train
|
def grant_permission_for(brain_or_object, permission, roles, acquire=0):
"""Grant the permission for the object to the defined roles
Code extracted from `IRoleManager.manage_permission`
:param brain_or_object: Catalog brain or object
:param permission: The permission to be granted
:param roles: The roles the permission to be granted to
:param acquire: Flag to acquire the permission
"""
obj = api.get_object(brain_or_object)
valid_roles = get_valid_roles_for(obj)
to_grant = list(get_roles_for_permission(permission, obj))
if isinstance(roles, basestring):
roles = [roles]
for role in roles:
if role not in to_grant:
if role not in valid_roles:
raise ValueError("The Role '{}' is invalid.".format(role))
# Append the role
to_grant.append(role)
manage_permission_for(obj, permission, to_grant, acquire=acquire)
|
python
|
{
"resource": ""
}
|
q21510
|
manage_permission_for
|
train
|
def manage_permission_for(brain_or_object, permission, roles, acquire=0):
"""Change the settings for the given permission.
Code extracted from `IRoleManager.manage_permission`
:param brain_or_object: Catalog brain or object
:param permission: The permission to be granted
:param roles: The roles the permission to be granted to
:param acquire: Flag to acquire the permission
"""
obj = api.get_object(brain_or_object)
if isinstance(roles, basestring):
roles = [roles]
for item in obj.ac_inherited_permissions(1):
name, value = item[:2]
if name == permission:
permission = Permission(name, value, obj)
if acquire:
roles = list(roles)
else:
roles = tuple(roles)
permission.setRoles(roles)
return
# Raise an error if the permission is invalid
raise ValueError("The permission {} is invalid.".format(permission))
|
python
|
{
"resource": ""
}
|
q21511
|
PrintForm.getCSS
|
train
|
def getCSS(self):
""" Returns the css style to be used for the current template.
If the selected template is 'default.pt', this method will
return the content from 'default.css'. If no css file found
for the current template, returns empty string
"""
template = self.request.get('template', self._DEFAULT_TEMPLATE)
content = ''
if template.find(':') >= 0:
prefix, template = template.split(':')
resource = queryResourceDirectory(
self._TEMPLATES_ADDON_DIR, prefix)
css = '{0}.css'.format(template[:-3])
if css in resource.listDirectory():
content = resource.readFile(css)
else:
this_dir = os.path.dirname(os.path.abspath(__file__))
templates_dir = os.path.join(this_dir, self._TEMPLATES_DIR)
path = '%s/%s.css' % (templates_dir, template[:-3])
with open(path, 'r') as content_file:
content = content_file.read()
return content
|
python
|
{
"resource": ""
}
|
q21512
|
PrintForm.pdfFromPOST
|
train
|
def pdfFromPOST(self):
"""
It returns the pdf for the sampling rounds printed
"""
html = self.request.form.get('html')
style = self.request.form.get('style')
reporthtml = "<html><head>%s</head><body><div id='report'>%s</body></html>" % (style, html)
return self.printFromHTML(safe_unicode(reporthtml).encode('utf-8'))
|
python
|
{
"resource": ""
}
|
q21513
|
BikaSetup.getAnalysisServicesVocabulary
|
train
|
def getAnalysisServicesVocabulary(self):
"""
Get all active Analysis Services from Bika Setup and return them as Display List.
"""
bsc = getToolByName(self, 'bika_setup_catalog')
brains = bsc(portal_type='AnalysisService',
is_active=True)
items = [(b.UID, b.Title) for b in brains]
items.insert(0, ("", ""))
items.sort(lambda x, y: cmp(x[1], y[1]))
return DisplayList(list(items))
|
python
|
{
"resource": ""
}
|
q21514
|
BikaSetup.getPrefixFor
|
train
|
def getPrefixFor(self, portal_type):
"""Return the prefix for a portal_type.
If not found, simply uses the portal_type itself
"""
prefix = [p for p in self.getIDFormatting() if p['portal_type'] == portal_type]
if prefix:
return prefix[0]['prefix']
else:
return portal_type
|
python
|
{
"resource": ""
}
|
q21515
|
BikaSetup.getRejectionReasonsItems
|
train
|
def getRejectionReasonsItems(self):
"""Return the list of predefined rejection reasons
"""
reasons = self.getRejectionReasons()
if not reasons:
return []
reasons = reasons[0]
keys = filter(lambda key: key != "checkbox", reasons.keys())
return map(lambda key: reasons[key], sorted(keys)) or []
|
python
|
{
"resource": ""
}
|
q21516
|
AnalysisRequestRejectBase.get_rejection_reasons
|
train
|
def get_rejection_reasons(self, keyword=None):
"""
Returns a list with the rejection reasons as strings
:param keyword: set of rejection reasons to be retrieved.
Possible values are:
- 'selected': Get, amongst the set of predefined reasons, the ones selected
- 'other': Get the user free-typed reason for rejection
- None: Get all rejection reasons
:return: list of rejection reasons as strings or an empty list
"""
keys = ['selected', 'other']
if keyword is None:
return sum(map(self.get_rejection_reasons, keys), [])
if keyword not in keys:
return []
rejection_reasons = self.context.getRejectionReasons()
rejection_reasons = rejection_reasons and rejection_reasons[0] or {}
if keyword == 'other':
return rejection_reasons.get(keyword, '') and [rejection_reasons.get(keyword, '')] or []
return rejection_reasons.get(keyword, [])
|
python
|
{
"resource": ""
}
|
q21517
|
BikaCatalogTool.softClearFindAndRebuild
|
train
|
def softClearFindAndRebuild(self):
"""
Empties catalog, then finds all contentish objects quering over
uid_catalog and reindexes them.
This may take a long time and will not care about missing
objects in uid_catalog.
"""
logger.info('Soft cleaning and rebuilding %s...' % self.id)
try:
at = getToolByName(self, 'archetype_tool')
types = [k for k, v in at.catalog_map.items()
if self.id in v]
self.counter = 0
self.manage_catalogClear()
# Getting UID catalog
portal = getToolByName(self, 'portal_url').getPortalObject()
uid_c = getToolByName(portal, 'uid_catalog')
brains = uid_c(portal_type=types)
self.total = len(brains)
for brain in brains:
obj = brain.getObject()
self.catalog_object(
obj, idxs=self.indexes(),
update_metadata=True)
self.counter += 1
if self.counter % 100 == 0:
logger.info(
'Progress: {}/{} objects have been cataloged for {}.'
.format(self.counter, self.total, self.id))
if self.counter % 1000 == 0:
transaction.commit()
logger.info(
'{0} items processed.'
.format(self.counter))
transaction.commit()
logger.info(
'{0} items processed.'
.format(self.counter))
except:
logger.error(traceback.format_exc())
e = sys.exc_info()
logger.error(
"Unable to clean and rebuild %s due to: %s" % (self.id, e))
logger.info('%s cleaned and rebuilt' % self.id)
|
python
|
{
"resource": ""
}
|
q21518
|
ARResultsInterpretationView.get_text
|
train
|
def get_text(self, department, mode="raw"):
"""Returns the text saved for the selected department
"""
row = self.context.getResultsInterpretationByDepartment(department)
rt = RichTextValue(row.get("richtext", ""), "text/plain", "text/html")
if mode == "output":
return rt.output
return rt.raw
|
python
|
{
"resource": ""
}
|
q21519
|
Client.getContactUIDForUser
|
train
|
def getContactUIDForUser(self):
"""Get the UID of the user associated with the authenticated user
"""
membership_tool = api.get_tool("portal_membership")
member = membership_tool.getAuthenticatedMember()
username = member.getUserName()
r = self.portal_catalog(
portal_type="Contact",
getUsername=username
)
if len(r) == 1:
return r[0].UID
|
python
|
{
"resource": ""
}
|
q21520
|
Client.getAnalysisCategories
|
train
|
def getAnalysisCategories(self):
"""Return all available analysis categories
"""
bsc = api.get_tool("bika_setup_catalog")
cats = []
for st in bsc(portal_type="AnalysisCategory",
is_active=True,
sort_on="sortable_title"):
cats.append((st.UID, st.Title))
return DisplayList(cats)
|
python
|
{
"resource": ""
}
|
q21521
|
Client.getContacts
|
train
|
def getContacts(self, only_active=True):
"""Return an array containing the contacts from this Client
"""
contacts = self.objectValues("Contact")
if only_active:
contacts = filter(api.is_active, contacts)
return contacts
|
python
|
{
"resource": ""
}
|
q21522
|
Client.getDecimalMark
|
train
|
def getDecimalMark(self):
"""Return the decimal mark to be used on reports for this client
If the client has DefaultDecimalMark selected, the Default value from
the LIMS Setup will be returned.
Otherwise, will return the value of DecimalMark.
"""
if self.getDefaultDecimalMark() is False:
return self.Schema()["DecimalMark"].get(self)
return self.bika_setup.getDecimalMark()
|
python
|
{
"resource": ""
}
|
q21523
|
Client.getCountry
|
train
|
def getCountry(self, default=None):
"""Return the Country from the Physical or Postal Address
"""
physical_address = self.getPhysicalAddress().get("country", default)
postal_address = self.getPostalAddress().get("country", default)
return physical_address or postal_address
|
python
|
{
"resource": ""
}
|
q21524
|
WorksheetImporter.get_rows
|
train
|
def get_rows(self, startrow=3, worksheet=None):
"""Returns a generator for all rows in a sheet.
Each row contains a dictionary where the key is the value of the
first row of the sheet for each column.
The data values are returned in utf-8 format.
Starts to consume data from startrow
"""
headers = []
row_nr = 0
worksheet = worksheet if worksheet else self.worksheet
for row in worksheet.rows: # .iter_rows():
row_nr += 1
if row_nr == 1:
# headers = [cell.internal_value for cell in row]
headers = [cell.value for cell in row]
continue
if row_nr % 1000 == 0:
transaction.savepoint()
if row_nr <= startrow:
continue
# row = [_c(cell.internal_value).decode('utf-8') for cell in row]
new_row = []
for cell in row:
value = cell.value
if value is None:
value = ''
if isinstance(value, unicode):
value = value.encode('utf-8')
# Strip any space, \t, \n, or \r characters from the left-hand
# side, right-hand side, or both sides of the string
if isinstance(value, str):
value = value.strip(' \t\n\r')
new_row.append(value)
row = dict(zip(headers, new_row))
# parse out addresses
for add_type in ['Physical', 'Postal', 'Billing']:
row[add_type] = {}
if add_type + "_Address" in row:
for key in ['Address', 'City', 'State', 'District', 'Zip', 'Country']:
row[add_type][key] = str(row.get("%s_%s" % (add_type, key), ''))
yield row
|
python
|
{
"resource": ""
}
|
q21525
|
WorksheetImporter.to_bool
|
train
|
def to_bool(self, value):
""" Converts a sheet string value to a boolean value.
Needed because of utf-8 conversions
"""
try:
value = value.lower()
except:
pass
try:
value = value.encode('utf-8')
except:
pass
try:
value = int(value)
except:
pass
if value in ('true', 1):
return True
else:
return False
|
python
|
{
"resource": ""
}
|
q21526
|
WorksheetImporter.get_object
|
train
|
def get_object(self, catalog, portal_type, title=None, **kwargs):
"""This will return an object from the catalog.
Logs a message and returns None if no object or multiple objects found.
All keyword arguments are passed verbatim to the contentFilter
"""
if not title and not kwargs:
return None
contentFilter = {"portal_type": portal_type}
if title:
contentFilter['title'] = to_unicode(title)
contentFilter.update(kwargs)
brains = catalog(contentFilter)
if len(brains) > 1:
logger.info("More than one object found for %s" % contentFilter)
return None
elif len(brains) == 0:
if portal_type == 'AnalysisService':
brains = catalog(portal_type=portal_type, getKeyword=title)
if brains:
return brains[0].getObject()
logger.info("No objects found for %s" % contentFilter)
return None
else:
return brains[0].getObject()
|
python
|
{
"resource": ""
}
|
q21527
|
Analysis_Services.get_relations
|
train
|
def get_relations(self, service_title, default_obj, obj_type, catalog_name, sheet_name, column):
""" Return an array of objects of the specified type in accordance to
the object titles defined in the sheet specified in 'sheet_name' and
service set in the paramenter 'service_title'.
If a default_obj is set, it will be included in the returned array.
"""
out_objects = [default_obj] if default_obj else []
cat = getToolByName(self.context, catalog_name)
worksheet = self.workbook.get_sheet_by_name(sheet_name)
if not worksheet:
return out_objects
for row in self.get_rows(3, worksheet=worksheet):
row_as_title = row.get('Service_title')
if not row_as_title:
return out_objects
elif row_as_title != service_title:
continue
obj = self.get_object(cat, obj_type, row.get(column))
if obj:
if default_obj and default_obj.UID() == obj.UID():
continue
out_objects.append(obj)
return out_objects
|
python
|
{
"resource": ""
}
|
q21528
|
fix_workflow_transitions
|
train
|
def fix_workflow_transitions(portal):
"""
Replace target states from some workflow statuses
"""
logger.info("Fixing workflow transitions...")
tochange = [
{'wfid': 'bika_duplicateanalysis_workflow',
'trid': 'submit',
'changes': {
'new_state_id': 'to_be_verified',
'guard_expr': ''
},
'update': {
'catalog': CATALOG_ANALYSIS_LISTING,
'portal_type': 'DuplicateAnalysis',
'status_from': 'attachment_due',
'status_to': 'to_be_verified'
}
}
]
wtool = api.get_tool('portal_workflow')
for item in tochange:
wfid = item['wfid']
trid = item['trid']
workflow = wtool.getWorkflowById(wfid)
transitions = workflow.transitions
transition = transitions[trid]
changes = item.get('changes', {})
if 'new_state_id' in changes:
new_state_id = changes['new_state_id']
oldstate = transition.new_state_id
logger.info(
"Replacing target state '{0}' from '{1}.{2}' to {3}"
.format(oldstate, wfid, trid, new_state_id)
)
transition.new_state_id = new_state_id
if 'guard_expr' in changes:
new_guard = changes['guard_expr']
if not new_guard:
transition.guard = None
logger.info(
"Removing guard expression from '{0}.{1}'"
.format(wfid, trid))
else:
guard = transition.getGuard()
guard.expr = Expression(new_guard)
transition.guard = guard
logger.info(
"Replacing guard expression from '{0}.{1}' to {2}"
.format(wfid, trid, new_guard))
update = item.get('update', {})
if update:
catalog_id = update['catalog']
portal_type = update['portal_type']
catalog = api.get_tool(catalog_id)
brains = catalog(portal_type=portal_type)
for brain in brains:
obj = api.get_object(brain)
if 'status_from' in update and 'status_to' in update:
status_from = update['status_from']
status_to = update['status_to']
if status_from == brain.review_state:
logger.info(
"Changing status for {0} from '{1} to {2}"
.format(obj.getId(), status_from, status_to))
changeWorkflowState(obj, wfid, status_to)
workflow.updateRoleMappingsFor(obj)
obj.reindexObject()
|
python
|
{
"resource": ""
}
|
q21529
|
GetSampleStickers.get_default_sticker_id
|
train
|
def get_default_sticker_id(self):
"""
Gets the default sticker for that content type depending on the
requested size.
:return: An sticker ID as string
"""
size = self.request.get('size', '')
if size == 'small':
return self.sample_type.getDefaultSmallSticker()
return self.sample_type.getDefaultLargeSticker()
|
python
|
{
"resource": ""
}
|
q21530
|
IdentifiersIndexer
|
train
|
def IdentifiersIndexer(instance):
"""Return a list of unique Identifier strings
This populates the Identifiers Keyword index, but with some
replacements to prevent the word-splitter etc from taking effect.
"""
identifiers = instance.Schema()['Identifiers'].get(instance)
return [safe_unicode(i['Identifier']) for i in identifiers]
|
python
|
{
"resource": ""
}
|
q21531
|
IHaveIdentifiersSchemaExtender.getOrder
|
train
|
def getOrder(self, schematas):
"""Return modified order of field schemats.
"""
schemata = self.context.schema['description'].schemata
fields = schematas[schemata]
fields.insert(fields.index('description') + 1,
'Identifiers')
return schematas
|
python
|
{
"resource": ""
}
|
q21532
|
AbstractAnalysis.getVerificators
|
train
|
def getVerificators(self):
"""Returns the user ids of the users that verified this analysis
"""
verifiers = list()
actions = ["verify", "multi_verify"]
for event in wf.getReviewHistory(self):
if event['action'] in actions:
verifiers.append(event['actor'])
sorted(verifiers, reverse=True)
return verifiers
|
python
|
{
"resource": ""
}
|
q21533
|
AbstractAnalysis.getDefaultUncertainty
|
train
|
def getDefaultUncertainty(self, result=None):
"""Return the uncertainty value, if the result falls within
specified ranges for the service from which this analysis was derived.
"""
if result is None:
result = self.getResult()
uncertainties = self.getUncertainties()
if uncertainties:
try:
res = float(result)
except (TypeError, ValueError):
# if analysis result is not a number, then we assume in range
return None
for d in uncertainties:
_min = float(d['intercept_min'])
_max = float(d['intercept_max'])
if _min <= res and res <= _max:
if str(d['errorvalue']).strip().endswith('%'):
try:
percvalue = float(d['errorvalue'].replace('%', ''))
except ValueError:
return None
uncertainty = res / 100 * percvalue
else:
uncertainty = float(d['errorvalue'])
return uncertainty
return None
|
python
|
{
"resource": ""
}
|
q21534
|
AbstractAnalysis.setUncertainty
|
train
|
def setUncertainty(self, unc):
"""Sets the uncertainty for this analysis. If the result is a
Detection Limit or the value is below LDL or upper UDL, sets the
uncertainty value to 0
"""
# Uncertainty calculation on DL
# https://jira.bikalabs.com/browse/LIMS-1808
if self.isAboveUpperDetectionLimit() or \
self.isBelowLowerDetectionLimit():
self.getField('Uncertainty').set(self, None)
else:
self.getField('Uncertainty').set(self, unc)
|
python
|
{
"resource": ""
}
|
q21535
|
AbstractAnalysis.isBelowLowerDetectionLimit
|
train
|
def isBelowLowerDetectionLimit(self):
"""Returns True if the result is below the Lower Detection Limit or
if Lower Detection Limit has been manually set
"""
if self.isLowerDetectionLimit():
return True
result = self.getResult()
if result and str(result).strip().startswith(LDL):
return True
if api.is_floatable(result):
return api.to_float(result) < self.getLowerDetectionLimit()
return False
|
python
|
{
"resource": ""
}
|
q21536
|
AbstractAnalysis.isAboveUpperDetectionLimit
|
train
|
def isAboveUpperDetectionLimit(self):
"""Returns True if the result is above the Upper Detection Limit or
if Upper Detection Limit has been manually set
"""
if self.isUpperDetectionLimit():
return True
result = self.getResult()
if result and str(result).strip().startswith(UDL):
return True
if api.is_floatable(result):
return api.to_float(result) > self.getUpperDetectionLimit()
return False
|
python
|
{
"resource": ""
}
|
q21537
|
AbstractAnalysis.getExponentialFormatPrecision
|
train
|
def getExponentialFormatPrecision(self, result=None):
""" Returns the precision for the Analysis Service and result
provided. Results with a precision value above this exponential
format precision should be formatted as scientific notation.
If the Calculate Precision according to Uncertainty is not set,
the method will return the exponential precision value set in the
Schema. Otherwise, will calculate the precision value according to
the Uncertainty and the result.
If Calculate Precision from the Uncertainty is set but no result
provided neither uncertainty values are set, returns the fixed
exponential precision.
Will return positive values if the result is below 0 and will return
0 or positive values if the result is above 0.
Given an analysis service with fixed exponential format
precision of 4:
Result Uncertainty Returns
5.234 0.22 0
13.5 1.34 1
0.0077 0.008 -3
32092 0.81 4
456021 423 5
For further details, visit https://jira.bikalabs.com/browse/LIMS-1334
:param result: if provided and "Calculate Precision according to the
Uncertainty" is set, the result will be used to retrieve the
uncertainty from which the precision must be calculated. Otherwise,
the fixed-precision will be used.
:returns: the precision
"""
if not result or self.getPrecisionFromUncertainty() is False:
return self._getExponentialFormatPrecision()
else:
uncertainty = self.getUncertainty(result)
if uncertainty is None:
return self._getExponentialFormatPrecision()
try:
float(result)
except ValueError:
# if analysis result is not a number, then we assume in range
return self._getExponentialFormatPrecision()
return get_significant_digits(uncertainty)
|
python
|
{
"resource": ""
}
|
q21538
|
AbstractAnalysis.getPrecision
|
train
|
def getPrecision(self, result=None):
"""Returns the precision for the Analysis.
- If ManualUncertainty is set, calculates the precision of the result
in accordance with the manual uncertainty set.
- If Calculate Precision from Uncertainty is set in Analysis Service,
calculates the precision in accordance with the uncertainty infered
from uncertainties ranges.
- If neither Manual Uncertainty nor Calculate Precision from
Uncertainty are set, returns the precision from the Analysis Service
- If you have a number with zero uncertainty: If you roll a pair of
dice and observe five spots, the number of spots is 5. This is a raw
data point, with no uncertainty whatsoever. So just write down the
number. Similarly, the number of centimeters per inch is 2.54,
by definition, with no uncertainty whatsoever. Again: just write
down the number.
Further information at AbstractBaseAnalysis.getPrecision()
"""
allow_manual = self.getAllowManualUncertainty()
precision_unc = self.getPrecisionFromUncertainty()
if allow_manual or precision_unc:
uncertainty = self.getUncertainty(result)
if uncertainty is None:
return self.getField('Precision').get(self)
if uncertainty == 0 and result is None:
return self.getField('Precision').get(self)
if uncertainty == 0:
strres = str(result)
numdecimals = strres[::-1].find('.')
return numdecimals
return get_significant_digits(uncertainty)
return self.getField('Precision').get(self)
|
python
|
{
"resource": ""
}
|
q21539
|
AbstractAnalysis.getAnalyst
|
train
|
def getAnalyst(self):
"""Returns the stored Analyst or the user who submitted the result
"""
analyst = self.getField("Analyst").get(self)
if not analyst:
analyst = self.getSubmittedBy()
return analyst or ""
|
python
|
{
"resource": ""
}
|
q21540
|
AbstractAnalysis.getWorksheet
|
train
|
def getWorksheet(self):
"""Returns the Worksheet to which this analysis belongs to, or None
"""
worksheet = self.getBackReferences('WorksheetAnalysis')
if not worksheet:
return None
if len(worksheet) > 1:
logger.error(
"Analysis %s is assigned to more than one worksheet."
% self.getId())
return worksheet[0]
|
python
|
{
"resource": ""
}
|
q21541
|
AbstractAnalysis.getAttachmentUIDs
|
train
|
def getAttachmentUIDs(self):
"""Used to populate metadata, so that we don't need full objects of
analyses when working with their attachments.
"""
attachments = self.getAttachment()
uids = [att.UID() for att in attachments]
return uids
|
python
|
{
"resource": ""
}
|
q21542
|
AbstractAnalysis.remove_duplicates
|
train
|
def remove_duplicates(self, ws):
"""When this analysis is unassigned from a worksheet, this function
is responsible for deleting DuplicateAnalysis objects from the ws.
"""
for analysis in ws.objectValues():
if IDuplicateAnalysis.providedBy(analysis) \
and analysis.getAnalysis().UID() == self.UID():
ws.removeAnalysis(analysis)
|
python
|
{
"resource": ""
}
|
q21543
|
AbstractAnalysis.getInterimValue
|
train
|
def getInterimValue(self, keyword):
"""Returns the value of an interim of this analysis
"""
interims = filter(lambda item: item["keyword"] == keyword,
self.getInterimFields())
if not interims:
logger.warning("Interim '{}' for analysis '{}' not found"
.format(keyword, self.getKeyword()))
return None
if len(interims) > 1:
logger.error("More than one interim '{}' found for '{}'"
.format(keyword, self.getKeyword()))
return None
return interims[0].get('value', '')
|
python
|
{
"resource": ""
}
|
q21544
|
checkUserAccess
|
train
|
def checkUserAccess(worksheet, request, redirect=True):
""" Checks if the current user has granted access to the worksheet.
If the user is an analyst without LabManager, LabClerk and
RegulatoryInspector roles and the option 'Allow analysts
only to access to the Worksheets on which they are assigned' is
ticked and the above condition is true, it will redirect to
the main Worksheets view.
Returns False if the user has no access, otherwise returns True
"""
# Deny access to foreign analysts
allowed = worksheet.checkUserAccess()
if allowed == False and redirect == True:
msg = _('You do not have sufficient privileges to view '
'the worksheet ${worksheet_title}.',
mapping={"worksheet_title": worksheet.Title()})
worksheet.plone_utils.addPortalMessage(msg, 'warning')
# Redirect to WS list
portal = getToolByName(worksheet, 'portal_url').getPortalObject()
destination_url = portal.absolute_url() + "/worksheets"
request.response.redirect(destination_url)
return allowed
|
python
|
{
"resource": ""
}
|
q21545
|
showRejectionMessage
|
train
|
def showRejectionMessage(worksheet):
""" Adds a portalMessage if
a) the worksheet has been rejected and replaced by another or
b) if the worksheet is the replacement of a rejected worksheet.
Otherwise, does nothing.
"""
if hasattr(worksheet, 'replaced_by'):
uc = getToolByName(worksheet, 'uid_catalog')
uid = getattr(worksheet, 'replaced_by')
_ws = uc(UID=uid)[0].getObject()
msg = _("This worksheet has been rejected. The replacement worksheet is ${ws_id}",
mapping={'ws_id':_ws.getId()})
worksheet.plone_utils.addPortalMessage(msg)
if hasattr(worksheet, 'replaces_rejected_worksheet'):
uc = getToolByName(worksheet, 'uid_catalog')
uid = getattr(worksheet, 'replaces_rejected_worksheet')
_ws = uc(UID=uid)[0].getObject()
msg = _("This worksheet has been created to replace the rejected "
"worksheet at ${ws_id}",
mapping={'ws_id':_ws.getId()})
worksheet.plone_utils.addPortalMessage(msg)
|
python
|
{
"resource": ""
}
|
q21546
|
get_date
|
train
|
def get_date(context, value):
"""Tries to return a DateTime.DateTime object
"""
if not value:
return None
if isinstance(value, DateTime):
return value
if isinstance(value, datetime):
return dt2DT(value)
if not isinstance(value, basestring):
return None
def try_parse(date_string, format):
if not format:
return None
try:
struct_time = strptime(date_string, format)
return datetime(*struct_time[:6])
except ValueError:
pass
return None
def get_locale_format(key, context):
format = context.translate(key, domain="senaite.core", mapping={})
# TODO: Is this replacement below strictly necessary?
return format.replace(r"${", '%').replace('}', '')
# Try with prioritized formats
formats = [get_locale_format("date_format_long", context),
get_locale_format("date_format_short", context),
"%Y-%m-%d %H:%M", "%Y-%m-%d", "%Y-%m-%d %H:%M:%S"]
for pri_format in formats:
val = try_parse(value, pri_format)
if not val:
continue
val = dt2DT(val)
if val.timezoneNaive():
# Use local timezone for tz naive strings
# see http://dev.plone.org/plone/ticket/10141
zone = val.localZone(safelocaltime(val.timeTime()))
parts = val.parts()[:-1] + (zone,)
val = DateTime(*parts)
return val
logger.warn("Unable to convert {} to datetime".format(value))
return None
|
python
|
{
"resource": ""
}
|
q21547
|
ulocalized_time
|
train
|
def ulocalized_time(time, long_format=None, time_only=None, context=None,
request=None):
"""
This function gets ans string as time or a DateTime objects and returns a
string with the time formatted
:param time: The time to process
:type time: str/DateTime
:param long_format: If True, return time in ling format
:type portal_type: boolean/null
:param time_only: If True, only returns time.
:type title: boolean/null
:param context: The current context
:type context: ATContentType
:param request: The current request
:type request: HTTPRequest object
:returns: The formatted date as string
:rtype: string
"""
# if time is a string, we'll try pass it through strptime with the various
# formats defined.
time = get_date(context, time)
if not time or not isinstance(time, DateTime):
return ''
# no printing times if they were not specified in inputs
if time.second() + time.minute() + time.hour() == 0:
long_format = False
try:
time_str = _ut(time, long_format, time_only, context, 'senaite.core', request)
except ValueError:
err_msg = traceback.format_exc() + '\n'
logger.warn(
err_msg + '\n' +
"Error converting '{}' time to string in {}."
.format(time, context))
time_str = ''
return time_str
|
python
|
{
"resource": ""
}
|
q21548
|
BrowserView.python_date_format
|
train
|
def python_date_format(self, long_format=None, time_only=False):
"""This convert bika domain date format msgstrs to Python
strftime format strings, by the same rules as ulocalized_time.
XXX i18nl10n.py may change, and that is where this code is taken from.
"""
# get msgid
msgid = long_format and 'date_format_long' or 'date_format_short'
if time_only:
msgid = 'time_format'
# get the formatstring
formatstring = translate(msgid, domain="senaite.core",
context=self.request)
if formatstring is None or formatstring.startswith(
'date_') or formatstring.startswith('time_'):
self.logger.error("bika/%s/%s could not be translated" %
(self.request.get('LANGUAGE'), msgid))
# msg catalog was not able to translate this msgids
# use default setting
properties = getToolByName(self.context,
'portal_properties').site_properties
if long_format:
format = properties.localLongTimeFormat
else:
if time_only:
format = properties.localTimeOnlyFormat
else:
format = properties.localTimeFormat
return format
return formatstring.replace(r"${", '%').replace('}', '')
|
python
|
{
"resource": ""
}
|
q21549
|
AbstractBaseAnalysis.getVATAmount
|
train
|
def getVATAmount(self):
"""Compute VAT Amount from the Price and system configured VAT
"""
price, vat = self.getPrice(), self.getVAT()
return float(price) * (float(vat) / 100)
|
python
|
{
"resource": ""
}
|
q21550
|
AbstractBaseAnalysis.getDiscountedPrice
|
train
|
def getDiscountedPrice(self):
"""Compute discounted price excl. VAT
"""
price = self.getPrice()
price = price and price or 0
discount = self.bika_setup.getMemberDiscount()
discount = discount and discount or 0
return float(price) - (float(price) * float(discount)) / 100
|
python
|
{
"resource": ""
}
|
q21551
|
AbstractBaseAnalysis.getDiscountedBulkPrice
|
train
|
def getDiscountedBulkPrice(self):
"""Compute discounted bulk discount excl. VAT
"""
price = self.getBulkPrice()
price = price and price or 0
discount = self.bika_setup.getMemberDiscount()
discount = discount and discount or 0
return float(price) - (float(price) * float(discount)) / 100
|
python
|
{
"resource": ""
}
|
q21552
|
AbstractBaseAnalysis.getTotalPrice
|
train
|
def getTotalPrice(self):
"""Compute total price including VAT
"""
price = self.getPrice()
vat = self.getVAT()
price = price and price or 0
vat = vat and vat or 0
return float(price) + (float(price) * float(vat)) / 100
|
python
|
{
"resource": ""
}
|
q21553
|
AbstractBaseAnalysis.getTotalBulkPrice
|
train
|
def getTotalBulkPrice(self):
"""Compute total bulk price
"""
price = self.getBulkPrice()
vat = self.getVAT()
price = price and price or 0
vat = vat and vat or 0
return float(price) + (float(price) * float(vat)) / 100
|
python
|
{
"resource": ""
}
|
q21554
|
AbstractBaseAnalysis.getTotalDiscountedPrice
|
train
|
def getTotalDiscountedPrice(self):
"""Compute total discounted price
"""
price = self.getDiscountedPrice()
vat = self.getVAT()
price = price and price or 0
vat = vat and vat or 0
return float(price) + (float(price) * float(vat)) / 100
|
python
|
{
"resource": ""
}
|
q21555
|
AbstractBaseAnalysis.getTotalDiscountedBulkPrice
|
train
|
def getTotalDiscountedBulkPrice(self):
"""Compute total discounted corporate bulk price
"""
price = self.getDiscountedCorporatePrice()
vat = self.getVAT()
price = price and price or 0
vat = vat and vat or 0
return float(price) + (float(price) * float(vat)) / 100
|
python
|
{
"resource": ""
}
|
q21556
|
AbstractBaseAnalysis.getLowerDetectionLimit
|
train
|
def getLowerDetectionLimit(self):
"""Returns the Lower Detection Limit for this service as a floatable
"""
ldl = self.getField('LowerDetectionLimit').get(self)
try:
return float(ldl)
except ValueError:
return 0
|
python
|
{
"resource": ""
}
|
q21557
|
AbstractBaseAnalysis.getUpperDetectionLimit
|
train
|
def getUpperDetectionLimit(self):
"""Returns the Upper Detection Limit for this service as a floatable
"""
udl = self.getField('UpperDetectionLimit').get(self)
try:
return float(udl)
except ValueError:
return 0
|
python
|
{
"resource": ""
}
|
q21558
|
WorkflowActionSubmitAdapter.get_interims_data
|
train
|
def get_interims_data(self):
"""Returns a dictionary with the interims data
"""
form = self.request.form
if 'item_data' not in form:
return {}
item_data = {}
if type(form['item_data']) == list:
for i_d in form['item_data']:
for i, d in json.loads(i_d).items():
item_data[i] = d
return item_data
return json.loads(form['item_data'])
|
python
|
{
"resource": ""
}
|
q21559
|
get_calculation_dependants_for
|
train
|
def get_calculation_dependants_for(service):
"""Collect all services which depend on this service
:param service: Analysis Service Object/ZCatalog Brain
:returns: List of services that depend on this service
"""
def calc_dependants_gen(service, collector=None):
"""Generator for recursive resolution of dependant sevices.
"""
# The UID of the service
service_uid = api.get_uid(service)
# maintain an internal dependency mapping
if collector is None:
collector = {}
# Stop iteration if we processed this service already
if service_uid in collector:
raise StopIteration
# Get the dependant calculations of the service
# (calculations that use the service in their formula).
calc_uids = get_backreferences(
service, relationship="CalculationDependentServices")
for calc_uid in calc_uids:
# Get the calculation object
calc = api.get_object_by_uid(calc_uid)
# Get the Analysis Services which have this calculation assigned
dep_service_uids = get_backreferences(
calc, relationship='AnalysisServiceCalculation')
for dep_service_uid in dep_service_uids:
dep_service = api.get_object_by_uid(dep_service_uid)
# remember the dependent service
collector[dep_service_uid] = dep_service
# yield the dependent service
yield dep_service
# check the dependants of the dependant services
for ddep_service in calc_dependants_gen(
dep_service, collector=collector):
yield ddep_service
dependants = {}
service = api.get_object(service)
for dep_service in calc_dependants_gen(service):
# Skip the initial (requested) service
if dep_service == service:
continue
uid = api.get_uid(dep_service)
dependants[uid] = dep_service
return dependants
|
python
|
{
"resource": ""
}
|
q21560
|
get_service_dependencies_for
|
train
|
def get_service_dependencies_for(service):
"""Calculate the dependencies for the given service.
"""
dependants = get_calculation_dependants_for(service)
dependencies = get_calculation_dependencies_for(service)
return {
"dependencies": dependencies.values(),
"dependants": dependants.values(),
}
|
python
|
{
"resource": ""
}
|
q21561
|
InstrumentQCFailuresViewlet.get_failed_instruments
|
train
|
def get_failed_instruments(self):
"""Find invalid instruments
- instruments who have failed QC tests
- instruments whose certificate is out of date
- instruments which are disposed until next calibration test
Return a dictionary with all info about expired/invalid instruments
"""
bsc = api.get_tool("bika_setup_catalog")
insts = bsc(portal_type="Instrument", is_active=True)
for i in insts:
i = i.getObject()
instr = {
'uid': i.UID(),
'title': i.Title(),
}
if i.isValidationInProgress():
instr['link'] = '<a href="%s/validations">%s</a>' % (
i.absolute_url(), i.Title()
)
self.nr_failed += 1
self.failed['validation'].append(instr)
elif i.isCalibrationInProgress():
instr['link'] = '<a href="%s/calibrations">%s</a>' % (
i.absolute_url(), i.Title()
)
self.nr_failed += 1
self.failed['calibration'].append(instr)
elif i.isOutOfDate():
instr['link'] = '<a href="%s/certifications">%s</a>' % (
i.absolute_url(), i.Title()
)
self.nr_failed += 1
self.failed['out-of-date'].append(instr)
elif not i.isQCValid():
instr['link'] = '<a href="%s/referenceanalyses">%s</a>' % (
i.absolute_url(), i.Title()
)
self.nr_failed += 1
self.failed['qc-fail'].append(instr)
elif i.getDisposeUntilNextCalibrationTest():
instr['link'] = '<a href="%s/referenceanalyses">%s</a>' % (
i.absolute_url(), i.Title()
)
self.nr_failed += 1
self.failed['next-test'].append(instr)
|
python
|
{
"resource": ""
}
|
q21562
|
InstrumentQCFailuresViewlet.available
|
train
|
def available(self):
"""Control availability of the viewlet
"""
url = api.get_url(self.context)
# render on the portal root
if self.context == api.get_portal():
return True
# render on the front-page
if url.endswith("/front-page"):
return True
# render for manage_results
if url.endswith("/manage_results"):
return True
return False
|
python
|
{
"resource": ""
}
|
q21563
|
InstrumentQCFailuresViewlet.render
|
train
|
def render(self):
"""Render the viewlet
"""
if not self.available():
return ""
mtool = api.get_tool("portal_membership")
member = mtool.getAuthenticatedMember()
roles = member.getRoles()
allowed = "LabManager" in roles or "Manager" in roles
self.get_failed_instruments()
if allowed and self.nr_failed:
return self.index()
else:
return ""
|
python
|
{
"resource": ""
}
|
q21564
|
AuditLogView.get_widget_for
|
train
|
def get_widget_for(self, fieldname):
"""Lookup the widget
"""
field = self.context.getField(fieldname)
if not field:
return None
return field.widget
|
python
|
{
"resource": ""
}
|
q21565
|
AuditLogView.get_widget_label_for
|
train
|
def get_widget_label_for(self, fieldname, default=None):
"""Lookup the widget of the field and return the label
"""
widget = self.get_widget_for(fieldname)
if widget is None:
return default
return widget.label
|
python
|
{
"resource": ""
}
|
q21566
|
AuditLogView.translate_state
|
train
|
def translate_state(self, s):
"""Translate the given state string
"""
if not isinstance(s, basestring):
return s
s = s.capitalize().replace("_", " ")
return t(_(s))
|
python
|
{
"resource": ""
}
|
q21567
|
AuditLogView.folderitems
|
train
|
def folderitems(self):
"""Generate folderitems for each version
"""
items = []
# get the snapshots
snapshots = get_snapshots(self.context)
# reverse the order to get the most recent change first
snapshots = list(reversed(snapshots))
# set the total number of items
self.total = len(snapshots)
# slice a batch
batch = snapshots[self.limit_from:self.limit_from+self.pagesize]
for snapshot in batch:
item = self.make_empty_item(**snapshot)
# get the version of the snapshot
version = get_snapshot_version(self.context, snapshot)
# Version
item["version"] = version
# get the metadata of the diff
metadata = get_snapshot_metadata(snapshot)
# Modification Date
m_date = metadata.get("modified")
item["modified"] = self.to_localized_time(m_date)
# Actor
actor = metadata.get("actor")
item["actor"] = actor
# Fullname
properties = api.get_user_properties(actor)
item["fullname"] = properties.get("fullname", actor)
# Roles
roles = metadata.get("roles", [])
item["roles"] = ", ".join(roles)
# Remote Address
remote_address = metadata.get("remote_address")
item["remote_address"] = remote_address
# Action
action = metadata.get("action")
item["action"] = self.translate_state(action)
# Review State
review_state = metadata.get("review_state")
item["review_state"] = self.translate_state(review_state)
# get the previous snapshot
prev_snapshot = get_snapshot_by_version(self.context, version-1)
if prev_snapshot:
prev_metadata = get_snapshot_metadata(prev_snapshot)
prev_review_state = prev_metadata.get("review_state")
if prev_review_state != review_state:
item["replace"]["review_state"] = "{} → {}".format(
self.translate_state(prev_review_state),
self.translate_state(review_state))
# Rendered Diff
diff = compare_snapshots(snapshot, prev_snapshot)
item["diff"] = self.render_diff(diff)
# append the item
items.append(item)
return items
|
python
|
{
"resource": ""
}
|
q21568
|
InstrumentResultsFileParser._addRawResult
|
train
|
def _addRawResult(self, resid, values={}, override=False):
""" Adds a set of raw results for an object with id=resid
resid is usually an Analysis Request ID or Worksheet's Reference
Analysis ID. The values are a dictionary in which the keys are
analysis service keywords and the values, another dictionary with
the key,value results.
The column 'DefaultResult' must be provided, because is used to map
to the column from which the default result must be retrieved.
Example:
resid = 'DU13162-001-R1'
values = {
'D2': {'DefaultResult': 'Final Conc',
'Remarks': '',
'Resp': '5816',
'ISTD Resp': '274638',
'Resp Ratio': '0.0212',
'Final Conc': '0.9145',
'Exp Conc': '1.9531',
'Accuracy': '98.19' },
'D3': {'DefaultResult': 'Final Conc',
'Remarks': '',
'Resp': '5816',
'ISTD Resp': '274638',
'Resp Ratio': '0.0212',
'Final Conc': '0.9145',
'Exp Conc': '1.9531',
'Accuracy': '98.19' }
}
"""
if override or resid not in self._rawresults.keys():
self._rawresults[resid] = [values]
else:
self._rawresults[resid].append(values)
|
python
|
{
"resource": ""
}
|
q21569
|
InstrumentResultsFileParser.getResultsTotalCount
|
train
|
def getResultsTotalCount(self):
""" The total number of analysis results parsed
"""
count = 0
for val in self.getRawResults().values():
count += len(val)
return count
|
python
|
{
"resource": ""
}
|
q21570
|
InstrumentResultsFileParser.getAnalysisKeywords
|
train
|
def getAnalysisKeywords(self):
""" The analysis service keywords found
"""
analyses = []
for rows in self.getRawResults().values():
for row in rows:
analyses = list(set(analyses + row.keys()))
return analyses
|
python
|
{
"resource": ""
}
|
q21571
|
InstrumentTXTResultsFileParser.read_file
|
train
|
def read_file(self, infile):
"""Given an input file read its contents, strip whitespace from the
beginning and end of each line and return a list of the preprocessed
lines read.
:param infile: file that contains the data to be read
:return: list of the read lines with stripped whitespace
"""
try:
encoding = self._encoding if self._encoding else None
mode = 'r' if self._encoding else 'rU'
with codecs.open(infile.name, mode, encoding=encoding) as f:
lines = f.readlines()
except AttributeError:
lines = infile.readlines()
lines = [line.strip() for line in lines]
return lines
|
python
|
{
"resource": ""
}
|
q21572
|
AnalysisResultsImporter.attach_attachment
|
train
|
def attach_attachment(self, analysis, attachment):
"""
Attach a file or a given set of files to an analysis
:param analysis: analysis where the files are to be attached
:param attachment: files to be attached. This can be either a
single file or a list of files
:return: None
"""
if not attachment:
return
if isinstance(attachment, list):
for attach in attachment:
self.attach_attachment(analysis, attach)
return
# current attachments
an_atts = analysis.getAttachment()
atts_filenames = [att.getAttachmentFile().filename for att in an_atts]
if attachment.getAttachmentFile().filename not in atts_filenames:
an_atts.append(attachment)
logger.info(
"Attaching %s to %s" % (attachment.UID(), analysis))
analysis.setAttachment([att.UID() for att in an_atts])
analysis.reindexObject()
else:
self.warn("Attachment %s was not linked to analysis %s" %
(attachment.UID(), analysis))
|
python
|
{
"resource": ""
}
|
q21573
|
format_numeric_result
|
train
|
def format_numeric_result(analysis, result, decimalmark='.', sciformat=1):
"""
Returns the formatted number part of a results value. This is
responsible for deciding the precision, and notation of numeric
values in accordance to the uncertainty. If a non-numeric
result value is given, the value will be returned unchanged.
The following rules apply:
If the "Calculate precision from uncertainties" is enabled in
the Analysis service, and
a) If the non-decimal number of digits of the result is above
the service's ExponentialFormatPrecision, the result will
be formatted in scientific notation.
Example:
Given an Analysis with an uncertainty of 37 for a range of
results between 30000 and 40000, with an
ExponentialFormatPrecision equal to 4 and a result of 32092,
this method will return 3.2092E+04
b) If the number of digits of the integer part of the result is
below the ExponentialFormatPrecision, the result will be
formatted as decimal notation and the resulta will be rounded
in accordance to the precision (calculated from the uncertainty)
Example:
Given an Analysis with an uncertainty of 0.22 for a range of
results between 1 and 10 with an ExponentialFormatPrecision
equal to 4 and a result of 5.234, this method will return 5.2
If the "Calculate precision from Uncertainties" is disabled in the
analysis service, the same rules described above applies, but the
precision used for rounding the result is not calculated from
the uncertainty. The fixed length precision is used instead.
For further details, visit
https://jira.bikalabs.com/browse/LIMS-1334
The default decimal mark '.' will be replaced by the decimalmark
specified.
:param analysis: the analysis from which the uncertainty, precision
and other additional info have to be retrieved
:param result: result to be formatted.
:param decimalmark: decimal mark to use. By default '.'
:param sciformat: 1. The sci notation has to be formatted as aE^+b
2. The sci notation has to be formatted as ax10^b
3. As 2, but with super html entity for exp
4. The sci notation has to be formatted as a·10^b
5. As 4, but with super html entity for exp
By default 1
:result: should be a string to preserve the decimal precision.
:returns: the formatted result as string
"""
try:
result = float(result)
except ValueError:
return result
# continuing with 'nan' result will cause formatting to fail.
if math.isnan(result):
return result
# Scientific notation?
# Get the default precision for scientific notation
threshold = analysis.getExponentialFormatPrecision()
precision = analysis.getPrecision(result)
formatted = _format_decimal_or_sci(result, precision, threshold, sciformat)
return formatDecimalMark(formatted, decimalmark)
|
python
|
{
"resource": ""
}
|
q21574
|
ReferenceResultsWidget._get_spec_value
|
train
|
def _get_spec_value(self, form, uid, key, default=''):
"""Returns the value assigned to the passed in key for the analysis
service uid from the passed in form.
If check_floatable is true, will return the passed in default if the
obtained value is not floatable
:param form: form being submitted
:param uid: uid of the Analysis Service the specification relates
:param key: id of the specs param to get (e.g. 'min')
:param check_floatable: check if the value is floatable
:param default: fallback value that will be returned by default
:type default: str, None
"""
if not form or not uid:
return default
values = form.get(key, None)
if not values or len(values) == 0:
return default
value = values[0].get(uid, default)
return api.is_floatable(value) and value or default
|
python
|
{
"resource": ""
}
|
q21575
|
ReferenceResultsWidget.ReferenceResults
|
train
|
def ReferenceResults(self, field, allow_edit=False):
"""Render Reference Results Table
"""
instance = getattr(self, "instance", field.aq_parent)
table = api.get_view("table_reference_results",
context=instance,
request=self.REQUEST)
# Call listing hooks
table.update()
table.before_render()
return table.ajax_contents_table()
|
python
|
{
"resource": ""
}
|
q21576
|
RemarksField.set
|
train
|
def set(self, instance, value, **kwargs):
"""Adds the value to the existing text stored in the field,
along with a small divider showing username and date of this entry.
"""
if not value:
return
value = value.strip()
date = DateTime().rfc822()
user = getSecurityManager().getUser()
username = user.getUserName()
divider = "=== {} ({})".format(date, username)
existing_remarks = instance.getRawRemarks()
remarks = '\n'.join([divider, value, existing_remarks])
ObjectField.set(self, instance, remarks)
# reindex the object after save to update all catalog metadata
instance.reindexObject()
# notify object edited event
event.notify(ObjectEditedEvent(instance))
|
python
|
{
"resource": ""
}
|
q21577
|
setup_handler
|
train
|
def setup_handler(context):
"""SENAITE setup handler
"""
if context.readDataFile("bika.lims_various.txt") is None:
return
logger.info("SENAITE setup handler [BEGIN]")
portal = context.getSite()
# Run Installers
remove_default_content(portal)
hide_navbar_items(portal)
reindex_content_structure(portal)
setup_groups(portal)
setup_catalog_mappings(portal)
setup_core_catalogs(portal)
# Setting up all LIMS catalogs defined in catalog folder
setup_catalogs(portal, getCatalogDefinitions())
# Run after all catalogs have been setup
setup_auditlog_catalog(portal)
logger.info("SENAITE setup handler [DONE]")
|
python
|
{
"resource": ""
}
|
q21578
|
remove_default_content
|
train
|
def remove_default_content(portal):
"""Remove default Plone contents
"""
logger.info("*** Delete Default Content ***")
# Get the list of object ids for portal
object_ids = portal.objectIds()
delete_ids = filter(lambda id: id in object_ids, CONTENTS_TO_DELETE)
portal.manage_delObjects(ids=delete_ids)
|
python
|
{
"resource": ""
}
|
q21579
|
hide_navbar_items
|
train
|
def hide_navbar_items(portal):
"""Hide root items in navigation
"""
logger.info("*** Hide Navigation Items ***")
# Get the list of object ids for portal
object_ids = portal.objectIds()
object_ids = filter(lambda id: id in object_ids, NAV_BAR_ITEMS_TO_HIDE)
for object_id in object_ids:
item = portal[object_id]
item.setExcludeFromNav(True)
item.reindexObject()
|
python
|
{
"resource": ""
}
|
q21580
|
reindex_content_structure
|
train
|
def reindex_content_structure(portal):
"""Reindex contents generated by Generic Setup
"""
logger.info("*** Reindex content structure ***")
def reindex(obj, recurse=False):
# skip catalog tools etc.
if api.is_object(obj):
obj.reindexObject()
if recurse and hasattr(aq_base(obj), "objectValues"):
map(reindex, obj.objectValues())
setup = api.get_setup()
setupitems = setup.objectValues()
rootitems = portal.objectValues()
for obj in itertools.chain(setupitems, rootitems):
logger.info("Reindexing {}".format(repr(obj)))
reindex(obj)
|
python
|
{
"resource": ""
}
|
q21581
|
setup_groups
|
train
|
def setup_groups(portal):
"""Setup roles and groups for BECHEM
"""
logger.info("*** Setup Roles and Groups ***")
portal_groups = api.get_tool("portal_groups")
for gdata in GROUPS:
group_id = gdata["id"]
# create the group and grant the roles
if group_id not in portal_groups.listGroupIds():
logger.info("+++ Adding group {title} ({id})".format(**gdata))
portal_groups.addGroup(group_id,
title=gdata["title"],
roles=gdata["roles"])
# grant the roles to the existing group
else:
ploneapi.group.grant_roles(
groupname=gdata["id"],
roles=gdata["roles"],)
logger.info("+++ Granted group {title} ({id}) the roles {roles}"
.format(**gdata))
|
python
|
{
"resource": ""
}
|
q21582
|
setup_catalog_mappings
|
train
|
def setup_catalog_mappings(portal):
"""Setup portal_type -> catalog mappings
"""
logger.info("*** Setup Catalog Mappings ***")
at = api.get_tool("archetype_tool")
for portal_type, catalogs in CATALOG_MAPPINGS:
at.setCatalogsByType(portal_type, catalogs)
|
python
|
{
"resource": ""
}
|
q21583
|
setup_core_catalogs
|
train
|
def setup_core_catalogs(portal):
"""Setup core catalogs
"""
logger.info("*** Setup Core Catalogs ***")
to_reindex = []
for catalog, name, attribute, meta_type in INDEXES:
c = api.get_tool(catalog)
indexes = c.indexes()
if name in indexes:
logger.info("*** Index '%s' already in Catalog [SKIP]" % name)
continue
logger.info("*** Adding Index '%s' for field '%s' to catalog ..."
% (meta_type, name))
# do we still need ZCTextIndexes?
if meta_type == "ZCTextIndex":
addZCTextIndex(c, name)
else:
c.addIndex(name, meta_type)
# get the new created index
index = c._catalog.getIndex(name)
# set the indexed attributes
if hasattr(index, "indexed_attrs"):
index.indexed_attrs = [attribute or name]
to_reindex.append((c, name))
logger.info("*** Added Index '%s' for field '%s' to catalog [DONE]"
% (meta_type, name))
# catalog columns
for catalog, name in COLUMNS:
c = api.get_tool(catalog)
if name not in c.schema():
logger.info("*** Adding Column '%s' to catalog '%s' ..."
% (name, catalog))
c.addColumn(name)
logger.info("*** Added Column '%s' to catalog '%s' [DONE]"
% (name, catalog))
else:
logger.info("*** Column '%s' already in catalog '%s' [SKIP]"
% (name, catalog))
continue
for catalog, name in to_reindex:
logger.info("*** Indexing new index '%s' ..." % name)
catalog.manage_reindexIndex(name)
logger.info("*** Indexing new index '%s' [DONE]" % name)
|
python
|
{
"resource": ""
}
|
q21584
|
setup_auditlog_catalog
|
train
|
def setup_auditlog_catalog(portal):
"""Setup auditlog catalog
"""
logger.info("*** Setup Audit Log Catalog ***")
catalog_id = auditlog_catalog.CATALOG_AUDITLOG
catalog = api.get_tool(catalog_id)
for name, meta_type in auditlog_catalog._indexes.iteritems():
indexes = catalog.indexes()
if name in indexes:
logger.info("*** Index '%s' already in Catalog [SKIP]" % name)
continue
logger.info("*** Adding Index '%s' for field '%s' to catalog ..."
% (meta_type, name))
catalog.addIndex(name, meta_type)
# Setup TextIndexNG3 for listings
# XXX is there another way to do this?
if meta_type == "TextIndexNG3":
index = catalog._catalog.getIndex(name)
index.index.default_encoding = "utf-8"
index.index.query_parser = "txng.parsers.en"
index.index.autoexpand = "always"
index.index.autoexpand_limit = 3
logger.info("*** Added Index '%s' for field '%s' to catalog [DONE]"
% (meta_type, name))
# Attach the catalog to all known portal types
at = api.get_tool("archetype_tool")
pt = api.get_tool("portal_types")
for portal_type in pt.listContentTypes():
catalogs = at.getCatalogsByType(portal_type)
if catalog not in catalogs:
new_catalogs = map(lambda c: c.getId(), catalogs) + [catalog_id]
at.setCatalogsByType(portal_type, new_catalogs)
logger.info("*** Adding catalog '{}' for '{}'".format(
catalog_id, portal_type))
|
python
|
{
"resource": ""
}
|
q21585
|
_createWorksheet
|
train
|
def _createWorksheet(base, worksheettemplate, analyst):
"""
This function creates a new worksheet takeing advantatge of the analyst
variable. If there isn't an analyst definet, the system will puck up the
the first one obtained in a query.
"""
if not(analyst):
# Get any analyst
analyst = getUsers(base, ['Manager', 'LabManager', 'Analyst'])[1]
folder = base.bika_setup.worksheets
_id = folder.invokeFactory('Worksheet', id=tmpID())
ws = folder[_id]
ws.unmarkCreationFlag()
new_ws_id = renameAfterCreation(ws)
ws.edit(
Number=new_ws_id,
Analyst=analyst,
)
if worksheettemplate:
ws.applyWorksheetTemplate(worksheettemplate)
return ws
|
python
|
{
"resource": ""
}
|
q21586
|
doWorksheetLogic
|
train
|
def doWorksheetLogic(base, action, analysis):
"""
This function checks if the actions contains worksheet actions.
There is a selection list in each action section. This select has the
following options and consequence.
1) "To the current worksheet" (selected by default)
2) "To another worksheet"
3) "Create another worksheet"
4) "No worksheet"
- If option 1) is selected, the Analyst selection list will not be
displayed. Since the action doesn't require to add the new analysis to
another worksheet, the function will try to add the analysis to the same
worksheet as the base analysis. If the base analysis is not assigned in a
worksheet, no worksheet will be assigned to the new analysis.
- If option 2) is selected, the Analyst selection list will be displayed.
- If option 2) is selected and an analyst has also been selected, then the
system will search for the latest worksheet in status "open" for the
selected analyst and will add the analysis in that worksheet (the system
also searches for the worksheet template if defined).
If the system doesn't find any match, another worksheet assigned to the
selected analyst and with the analysis must be automatically created.
- If option 2) is selected but no analyst selected, then the system will
search for the latest worksheet in the status "open" regardless of the
analyst assigned and will add the analysis in that worksheet. If there
isn't any open worksheet available, then go to option 3)
- If option 3) is selected, a new worksheet with the defined analyst will
be created.
If no analyst is defined for the original analysis, the system
will create a new worksheet and assign the same analyst as the original
analysis to which the rule applies.
If the original analysis doesn't have assigned any analyst, the system will
assign the same analyst that was assigned to the latest worksheet available
in the system. If there isn't any worksheet created yet, use the first
active user with role "analyst" available.
- if option 4) the Analyst selection list will not be displayed. The
analysis (duplicate, repeat, whatever) will be created, but not assigned
to any worksheet, so it will stay "on queue", assigned to the same
Analysis Request as the original analysis for which the rule has been
triggered.
"""
otherWS = action.get('otherWS', False)
worksheet_catalog = getToolByName(base, CATALOG_WORKSHEET_LISTING)
if otherWS in ['to_another', 'create_another']:
# Adds the new analysis inside same worksheet as the previous analysis.
# Checking if the actions defines an analyst
new_analyst = action.get('analyst', '')
# Checking if the action defines a worksheet template
worksheettemplate = action.get('worksheettemplate', '')
# Creating the query
contentFilter = {
'review_state': 'open',
'sort_on': 'created',
'sort_order': 'reverse'}
# If a new analyst is defined, add the analysis to the first
# analyst's worksheet
if new_analyst:
# Getting the last worksheet created for the analyst
contentFilter['Analyst'] = new_analyst
if worksheettemplate:
# Adding the worksheettemplate filter
contentFilter['getWorksheetTemplateUID'] = worksheettemplate
# Run the filter
wss = worksheet_catalog(contentFilter)
# 'repeat' actions takes advantatge of the 'retract' workflow action.
# the retract process assigns the new analysis to the same worksheet
# as the base analysis, so we need to desassign it now.
ws = analysis.getWorksheet()
if ws:
ws.removeAnalysis(analysis)
# If worksheet found and option 2
if len(wss) > 0 and otherWS == 'to_another':
# Add the new analysis to the worksheet
wss[0].getObject().addAnalysis(analysis)
# No worksheet found, but option 2 or 3 selected:
elif new_analyst:
# Create a new worksheet and add the analysis to it
ws = _createWorksheet(base, worksheettemplate, new_analyst)
ws.addAnalysis(analysis)
elif not new_analyst:
# Getting the original analysis to which the rule applies
previous_analysis = analysis.getReflexAnalysisOf()
# Getting the worksheet of the analysis
prev_ws = previous_analysis.getWorksheet()
# Getting the analyst from the worksheet
prev_analyst = prev_ws.getAnalyst() if prev_ws else ''
# If the previous analysis belongs to a worksheet:
if prev_analyst:
ws = _createWorksheet(base, worksheettemplate, prev_analyst)
ws.addAnalysis(analysis)
# If the original analysis doesn't have assigned any analyst
else:
# assign the same analyst that was assigned to the latest
# worksheet available
prev_analyst = wss[0].getObject().getAnalyst() if wss else ''
if prev_analyst:
ws = _createWorksheet(base, worksheettemplate, prev_analyst)
ws.addAnalysis(analysis)
elif otherWS == 'current':
# Getting the base's worksheet
ws = base.getWorksheet()
if ws:
# If the old analysis belongs to a worksheet, add the new
# one to it
ws.addAnalysis(analysis)
# If option 1 selected and no ws found, no worksheet will be assigned to
# the analysis.
# If option 4 selected, no worksheet will be assigned to the analysis
elif otherWS == 'no_ws':
ws = analysis.getWorksheet()
if ws:
ws.removeAnalysis(analysis)
|
python
|
{
"resource": ""
}
|
q21587
|
ARImport.workflow_before_validate
|
train
|
def workflow_before_validate(self):
"""This function transposes values from the provided file into the
ARImport object's fields, and checks for invalid values.
If errors are found:
- Validation transition is aborted.
- Errors are stored on object and displayed to user.
"""
# Re-set the errors on this ARImport each time validation is attempted.
# When errors are detected they are immediately appended to this field.
self.setErrors([])
self.validate_headers()
self.validate_samples()
if self.getErrors():
addStatusMessage(self.REQUEST, _p('Validation errors.'), 'error')
transaction.commit()
self.REQUEST.response.write(
'<script>document.location.href="%s/edit"</script>' % (
self.absolute_url()))
self.REQUEST.response.write(
'<script>document.location.href="%s/view"</script>' % (
self.absolute_url()))
|
python
|
{
"resource": ""
}
|
q21588
|
ARImport.workflow_script_import
|
train
|
def workflow_script_import(self):
"""Create objects from valid ARImport
"""
bsc = getToolByName(self, 'bika_setup_catalog')
client = self.aq_parent
title = _('Submitting Sample Import')
description = _('Creating and initialising objects')
bar = ProgressBar(self, self.REQUEST, title, description)
notify(InitialiseProgressBar(bar))
profiles = [x.getObject() for x in bsc(portal_type='AnalysisProfile')]
gridrows = self.schema['SampleData'].get(self)
row_cnt = 0
for therow in gridrows:
row = deepcopy(therow)
row_cnt += 1
# Profiles are titles, profile keys, or UIDS: convert them to UIDs.
newprofiles = []
for title in row['Profiles']:
objects = [x for x in profiles
if title in (x.getProfileKey(), x.UID(), x.Title())]
for obj in objects:
newprofiles.append(obj.UID())
row['Profiles'] = newprofiles
# Same for analyses
newanalyses = set(self.get_row_services(row) +
self.get_row_profile_services(row))
# get batch
batch = self.schema['Batch'].get(self)
if batch:
row['Batch'] = batch.UID()
# Add AR fields from schema into this row's data
row['ClientReference'] = self.getClientReference()
row['ClientOrderNumber'] = self.getClientOrderNumber()
contact_uid =\
self.getContact().UID() if self.getContact() else None
row['Contact'] = contact_uid
# Creating analysis request from gathered data
ar = create_analysisrequest(
client,
self.REQUEST,
row,
analyses=list(newanalyses),)
# progress marker update
progress_index = float(row_cnt) / len(gridrows) * 100
progress = ProgressState(self.REQUEST, progress_index)
notify(UpdateProgressEvent(progress))
# document has been written to, and redirect() fails here
self.REQUEST.response.write(
'<script>document.location.href="%s"</script>' % (
self.absolute_url()))
|
python
|
{
"resource": ""
}
|
q21589
|
ARImport.get_header_values
|
train
|
def get_header_values(self):
"""Scrape the "Header" values from the original input file
"""
lines = self.getOriginalFile().data.splitlines()
reader = csv.reader(lines)
header_fields = header_data = []
for row in reader:
if not any(row):
continue
if row[0].strip().lower() == 'header':
header_fields = [x.strip() for x in row][1:]
continue
if row[0].strip().lower() == 'header data':
header_data = [x.strip() for x in row][1:]
break
if not (header_data or header_fields):
return None
if not (header_data and header_fields):
self.error("File is missing header row or header data")
return None
# inject us out of here
values = dict(zip(header_fields, header_data))
# blank cell from sheet will probably make it in here:
if '' in values:
del (values[''])
return values
|
python
|
{
"resource": ""
}
|
q21590
|
ARImport.save_header_data
|
train
|
def save_header_data(self):
"""Save values from the file's header row into their schema fields.
"""
client = self.aq_parent
headers = self.get_header_values()
if not headers:
return False
# Plain header fields that can be set into plain schema fields:
for h, f in [
('File name', 'Filename'),
('No of Samples', 'NrSamples'),
('Client name', 'ClientName'),
('Client ID', 'ClientID'),
('Client Order Number', 'ClientOrderNumber'),
('Client Reference', 'ClientReference')
]:
v = headers.get(h, None)
if v:
field = self.schema[f]
field.set(self, v)
del (headers[h])
# Primary Contact
v = headers.get('Contact', None)
contacts = [x for x in client.objectValues('Contact')]
contact = [c for c in contacts if c.Title() == v]
if contact:
self.schema['Contact'].set(self, contact)
else:
self.error("Specified contact '%s' does not exist; using '%s'"%
(v, contacts[0].Title()))
self.schema['Contact'].set(self, contacts[0])
del (headers['Contact'])
# CCContacts
field_value = {
'CCNamesReport': '',
'CCEmailsReport': '',
'CCNamesInvoice': '',
'CCEmailsInvoice': ''
}
for h, f in [
# csv header name DataGrid Column ID
('CC Names - Report', 'CCNamesReport'),
('CC Emails - Report', 'CCEmailsReport'),
('CC Names - Invoice', 'CCNamesInvoice'),
('CC Emails - Invoice', 'CCEmailsInvoice'),
]:
if h in headers:
values = [x.strip() for x in headers.get(h, '').split(",")]
field_value[f] = values if values else ''
del (headers[h])
self.schema['CCContacts'].set(self, [field_value])
if headers:
unexpected = ','.join(headers.keys())
self.error("Unexpected header fields: %s" % unexpected)
|
python
|
{
"resource": ""
}
|
q21591
|
ARImport.get_sample_values
|
train
|
def get_sample_values(self):
"""Read the rows specifying Samples and return a dictionary with
related data.
keys are:
headers - row with "Samples" in column 0. These headers are
used as dictionary keys in the rows below.
prices - Row with "Analysis Price" in column 0.
total_analyses - Row with "Total analyses" in colmn 0
price_totals - Row with "Total price excl Tax" in column 0
samples - All other sample rows.
"""
res = {'samples': []}
lines = self.getOriginalFile().data.splitlines()
reader = csv.reader(lines)
next_rows_are_sample_rows = False
for row in reader:
if not any(row):
continue
if next_rows_are_sample_rows:
vals = [x.strip() for x in row]
if not any(vals):
continue
res['samples'].append(zip(res['headers'], vals))
elif row[0].strip().lower() == 'samples':
res['headers'] = [x.strip() for x in row]
elif row[0].strip().lower() == 'analysis price':
res['prices'] = \
zip(res['headers'], [x.strip() for x in row])
elif row[0].strip().lower() == 'total analyses':
res['total_analyses'] = \
zip(res['headers'], [x.strip() for x in row])
elif row[0].strip().lower() == 'total price excl tax':
res['price_totals'] = \
zip(res['headers'], [x.strip() for x in row])
next_rows_are_sample_rows = True
return res
|
python
|
{
"resource": ""
}
|
q21592
|
ARImport.get_batch_header_values
|
train
|
def get_batch_header_values(self):
"""Scrape the "Batch Header" values from the original input file
"""
lines = self.getOriginalFile().data.splitlines()
reader = csv.reader(lines)
batch_headers = batch_data = []
for row in reader:
if not any(row):
continue
if row[0].strip().lower() == 'batch header':
batch_headers = [x.strip() for x in row][1:]
continue
if row[0].strip().lower() == 'batch data':
batch_data = [x.strip() for x in row][1:]
break
if not (batch_data or batch_headers):
return None
if not (batch_data and batch_headers):
self.error("Missing batch headers or data")
return None
# Inject us out of here
values = dict(zip(batch_headers, batch_data))
return values
|
python
|
{
"resource": ""
}
|
q21593
|
ARImport.create_or_reference_batch
|
train
|
def create_or_reference_batch(self):
"""Save reference to batch, if existing batch specified
Create new batch, if possible with specified values
"""
client = self.aq_parent
batch_headers = self.get_batch_header_values()
if not batch_headers:
return False
# if the Batch's Title is specified and exists, no further
# action is required. We will just set the Batch field to
# use the existing object.
batch_title = batch_headers.get('title', False)
if batch_title:
existing_batch = [x for x in client.objectValues('Batch')
if x.title == batch_title]
if existing_batch:
self.setBatch(existing_batch[0])
return existing_batch[0]
# If the batch title is specified but does not exist,
# we will attempt to create the bach now.
if 'title' in batch_headers:
if 'id' in batch_headers:
del (batch_headers['id'])
if '' in batch_headers:
del (batch_headers[''])
batch = _createObjectByType('Batch', client, tmpID())
batch.processForm()
batch.edit(**batch_headers)
self.setBatch(batch)
|
python
|
{
"resource": ""
}
|
q21594
|
ARImport.validate_headers
|
train
|
def validate_headers(self):
"""Validate headers fields from schema
"""
pc = getToolByName(self, 'portal_catalog')
pu = getToolByName(self, "plone_utils")
client = self.aq_parent
# Verify Client Name
if self.getClientName() != client.Title():
self.error("%s: value is invalid (%s)." % (
'Client name', self.getClientName()))
# Verify Client ID
if self.getClientID() != client.getClientID():
self.error("%s: value is invalid (%s)." % (
'Client ID', self.getClientID()))
existing_arimports = pc(portal_type='ARImport',
review_state=['valid', 'imported'])
# Verify Client Order Number
for arimport in existing_arimports:
if arimport.UID == self.UID() \
or not arimport.getClientOrderNumber():
continue
arimport = arimport.getObject()
if arimport.getClientOrderNumber() == self.getClientOrderNumber():
self.error('%s: already used by existing ARImport.' %
'ClientOrderNumber')
break
# Verify Client Reference
for arimport in existing_arimports:
if arimport.UID == self.UID() \
or not arimport.getClientReference():
continue
arimport = arimport.getObject()
if arimport.getClientReference() == self.getClientReference():
self.error('%s: already used by existing ARImport.' %
'ClientReference')
break
# getCCContacts has no value if object is not complete (eg during test)
if self.getCCContacts():
cc_contacts = self.getCCContacts()[0]
contacts = [x for x in client.objectValues('Contact')]
contact_names = [c.Title() for c in contacts]
# validate Contact existence in this Client
for k in ['CCNamesReport', 'CCNamesInvoice']:
for val in cc_contacts[k]:
if val and val not in contact_names:
self.error('%s: value is invalid (%s)' % (k, val))
else:
cc_contacts = {'CCNamesReport': [],
'CCEmailsReport': [],
'CCNamesInvoice': [],
'CCEmailsInvoice': []
}
# validate Contact existence in this Client
for k in ['CCEmailsReport', 'CCEmailsInvoice']:
for val in cc_contacts.get(k, []):
if val and not pu.validateSingleNormalizedEmailAddress(val):
self.error('%s: value is invalid (%s)' % (k, val))
|
python
|
{
"resource": ""
}
|
q21595
|
ARImport.validate_samples
|
train
|
def validate_samples(self):
"""Scan through the SampleData values and make sure
that each one is correct
"""
bsc = getToolByName(self, 'bika_setup_catalog')
keywords = bsc.uniqueValuesFor('getKeyword')
profiles = []
for p in bsc(portal_type='AnalysisProfile'):
p = p.getObject()
profiles.append(p.Title())
profiles.append(p.getProfileKey())
row_nr = 0
for gridrow in self.getSampleData():
row_nr += 1
# validate against sample and ar schemas
for k, v in gridrow.items():
if k in ['Analysis', 'Profiles']:
break
if k in sample_schema:
try:
self.validate_against_schema(
sample_schema, row_nr, k, v)
continue
except ValueError as e:
self.error(e.message)
break
if k in ar_schema:
try:
self.validate_against_schema(
ar_schema, row_nr, k, v)
except ValueError as e:
self.error(e.message)
an_cnt = 0
for v in gridrow['Analyses']:
if v and v not in keywords:
self.error("Row %s: value is invalid (%s=%s)" %
('Analysis keyword', row_nr, v))
else:
an_cnt += 1
for v in gridrow['Profiles']:
if v and v not in profiles:
self.error("Row %s: value is invalid (%s=%s)" %
('Profile Title', row_nr, v))
else:
an_cnt += 1
if not an_cnt:
self.error("Row %s: No valid analyses or profiles" % row_nr)
|
python
|
{
"resource": ""
}
|
q21596
|
ARImport.get_row_services
|
train
|
def get_row_services(self, row):
"""Return a list of services which are referenced in Analyses.
values may be UID, Title or Keyword.
"""
bsc = getToolByName(self, 'bika_setup_catalog')
services = set()
for val in row.get('Analyses', []):
brains = bsc(portal_type='AnalysisService', getKeyword=val)
if not brains:
brains = bsc(portal_type='AnalysisService', title=val)
if not brains:
brains = bsc(portal_type='AnalysisService', UID=val)
if brains:
services.add(brains[0].UID)
else:
self.error("Invalid analysis specified: %s" % val)
return list(services)
|
python
|
{
"resource": ""
}
|
q21597
|
ARImport.get_row_profile_services
|
train
|
def get_row_profile_services(self, row):
"""Return a list of services which are referenced in profiles
values may be UID, Title or ProfileKey.
"""
bsc = getToolByName(self, 'bika_setup_catalog')
services = set()
profiles = [x.getObject() for x in bsc(portal_type='AnalysisProfile')]
for val in row.get('Profiles', []):
objects = [x for x in profiles
if val in (x.getProfileKey(), x.UID(), x.Title())]
if objects:
for service in objects[0].getService():
services.add(service.UID())
else:
self.error("Invalid profile specified: %s" % val)
return list(services)
|
python
|
{
"resource": ""
}
|
q21598
|
ARImport.get_row_container
|
train
|
def get_row_container(self, row):
"""Return a sample container
"""
bsc = getToolByName(self, 'bika_setup_catalog')
val = row.get('Container', False)
if val:
brains = bsc(portal_type='Container', UID=row['Container'])
if brains:
brains[0].getObject()
brains = bsc(portal_type='ContainerType', UID=row['Container'])
if brains:
# XXX Cheating. The calculation of capacity vs. volume is not done.
return brains[0].getObject()
return None
|
python
|
{
"resource": ""
}
|
q21599
|
t
|
train
|
def t(i18n_msg):
"""Safely translate and convert to UTF8, any zope i18n msgid returned from
a bikaMessageFactory _
"""
text = to_unicode(i18n_msg)
try:
request = api.get_request()
domain = getattr(i18n_msg, "domain", "senaite.core")
text = translate(text, domain=domain, context=request)
except UnicodeDecodeError:
# TODO: This is only a quick fix
logger.warn("{} couldn't be translated".format(text))
return to_utf8(text)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.