sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def explicify_hydrogens(self): """ add explicit hydrogens to atoms :return: number of added atoms """ tmp = [] for n, atom in self.atoms(): if atom.element != 'H': for _ in range(atom.get_implicit_h([x.order for x in self._adj[n].values()])): tmp.append(n) for n in tmp: self.add_bond(n, self.add_atom(H), Bond()) self.flush_cache() return len(tmp)
add explicit hydrogens to atoms :return: number of added atoms
entailment
def substructure(self, atoms, meta=False, as_view=True): """ create substructure containing atoms from nbunch list :param atoms: list of atoms numbers of substructure :param meta: if True metadata will be copied to substructure :param as_view: If True, the returned graph-view provides a read-only view of the original structure scaffold without actually copying any data. """ s = super().substructure(atoms, meta, as_view) if as_view: s.check_valence = s.explicify_hydrogens = s.implicify_hydrogens = s.reset_query_marks = frozen s.standardize = s.aromatize = frozen return s
create substructure containing atoms from nbunch list :param atoms: list of atoms numbers of substructure :param meta: if True metadata will be copied to substructure :param as_view: If True, the returned graph-view provides a read-only view of the original structure scaffold without actually copying any data.
entailment
def check_valence(self): """ check valences of all atoms :return: list of invalid atoms """ return [x for x, atom in self.atoms() if not atom.check_valence(self.environment(x))]
check valences of all atoms :return: list of invalid atoms
entailment
def _matcher(self, other): """ return VF2 GraphMatcher MoleculeContainer < MoleculeContainer MoleculeContainer < CGRContainer """ if isinstance(other, (self._get_subclass('CGRContainer'), MoleculeContainer)): return GraphMatcher(other, self, lambda x, y: x == y, lambda x, y: x == y) raise TypeError('only cgr-cgr possible')
return VF2 GraphMatcher MoleculeContainer < MoleculeContainer MoleculeContainer < CGRContainer
entailment
def to_datetime(date): """Turn a date into a datetime at midnight. """ return datetime.datetime.combine(date, datetime.datetime.min.time())
Turn a date into a datetime at midnight.
entailment
def iter_size_changes(self, issue): """Yield an IssueSnapshot for each time the issue size changed """ # Find the first size change, if any try: size_changes = list(filter(lambda h: h.field == 'Story Points', itertools.chain.from_iterable([c.items for c in issue.changelog.histories]))) except AttributeError: return # If we have no size changes and the issue has a current size then a size must have ben specified at issue creation time. # Return the size at creation time try: current_size = issue.fields.__dict__[self.fields['StoryPoints']] except: current_size = None size = (size_changes[0].fromString) if len(size_changes) else current_size # Issue was created yield IssueSizeSnapshot( change=None, key=issue.key, date=dateutil.parser.parse(issue.fields.created), size=size ) for change in issue.changelog.histories: change_date = dateutil.parser.parse(change.created) #sizes = list(filter(lambda i: i.field == 'Story Points', change.items)) #is_resolved = (sizes[-1].to is not None) if len(sizes) > 0 else is_resolved for item in change.items: if item.field == 'Story Points': # StoryPoints value was changed size = item.toString yield IssueSizeSnapshot( change=item.field, key=issue.key, date=change_date, size=size )
Yield an IssueSnapshot for each time the issue size changed
entailment
def iter_changes(self, issue, include_resolution_changes=True): """Yield an IssueSnapshot for each time the issue changed status or resolution """ is_resolved = False # Find the first status change, if any try: status_changes = list(filter( lambda h: h.field == 'status', itertools.chain.from_iterable([c.items for c in issue.changelog.histories]))) except AttributeError: return last_status = status_changes[0].fromString if len(status_changes) > 0 else issue.fields.status.name last_resolution = None # Issue was created yield IssueSnapshot( change=None, key=issue.key, date=dateutil.parser.parse(issue.fields.created), status=last_status, resolution=None, is_resolved=is_resolved ) for change in issue.changelog.histories: change_date = dateutil.parser.parse(change.created) resolutions = list(filter(lambda i: i.field == 'resolution', change.items)) is_resolved = (resolutions[-1].to is not None) if len(resolutions) > 0 else is_resolved for item in change.items: if item.field == 'status': # Status was changed last_status = item.toString yield IssueSnapshot( change=item.field, key=issue.key, date=change_date, status=last_status, resolution=last_resolution, is_resolved=is_resolved ) elif item.field == 'resolution': last_resolution = item.toString if include_resolution_changes: yield IssueSnapshot( change=item.field, key=issue.key, date=change_date, status=last_status, resolution=last_resolution, is_resolved=is_resolved )
Yield an IssueSnapshot for each time the issue changed status or resolution
entailment
def find_issues(self, criteria={}, jql=None, order='KEY ASC', verbose=False, changelog=True): """Return a list of issues with changelog metadata. Searches for the `issue_types`, `project`, `valid_resolutions` and 'jql_filter' set in the passed-in `criteria` object. Pass a JQL string to further qualify the query results. """ query = [] if criteria.get('project', False): query.append('project IN (%s)' % ', '.join(['"%s"' % p for p in criteria['project']])) if criteria.get('issue_types', False): query.append('issueType IN (%s)' % ', '.join(['"%s"' % t for t in criteria['issue_types']])) if criteria.get('valid_resolutions', False): query.append('(resolution IS EMPTY OR resolution IN (%s))' % ', '.join(['"%s"' % r for r in criteria['valid_resolutions']])) if criteria.get('jql_filter') is not None: query.append('(%s)' % criteria['jql_filter']) if jql is not None: query.append('(%s)' % jql) queryString = "%s ORDER BY %s" % (' AND '.join(query), order,) if verbose: print("Fetching issues with query:", queryString) fromRow=0 issues = [] while True: try: if changelog: pageofissues = self.jira.search_issues(queryString, expand='changelog', maxResults=self.settings['max_results'],startAt=fromRow) else: pageofissues = self.jira.search_issues(queryString, maxResults=self.settings['max_results'],startAt=fromRow) fromRow = fromRow + int(self.settings['max_results']) issues += pageofissues if verbose: print("Got %s lines per jira query from result starting at line number %s " % (self.settings['max_results'], fromRow)) if len(pageofissues)==0: break except JIRAError as e: print("Jira query error with: {}\n{}".format(queryString, e)) return [] if verbose: print("Fetched", len(issues), "issues") return issues
Return a list of issues with changelog metadata. Searches for the `issue_types`, `project`, `valid_resolutions` and 'jql_filter' set in the passed-in `criteria` object. Pass a JQL string to further qualify the query results.
entailment
def list_catalogs(self): """ Lists existing catalogs respect to ui view template format """ _form = CatalogSelectForm(current=self.current) _form.set_choices_of('catalog', [(i, i) for i in fixture_bucket.get_keys()]) self.form_out(_form)
Lists existing catalogs respect to ui view template format
entailment
def get_catalog(self): """ Get existing catalog and fill the form with the model data. If given key not found as catalog, it generates an empty catalog data form. """ catalog_data = fixture_bucket.get(self.input['form']['catalog']) # define add or edit based on catalog data exists add_or_edit = "Edit" if catalog_data.exists else "Add" # generate form catalog_edit_form = CatalogEditForm( current=self.current, title='%s: %s' % (add_or_edit, self.input['form']['catalog'])) # add model data to form if catalog_data.exists: if type(catalog_data.data) == list: # if catalog data is an array it means no other language of value defined, therefor the value is turkish for key, data in enumerate(catalog_data.data): catalog_edit_form.CatalogDatas(catalog_key=key or "0", en='', tr=data) if type(catalog_data.data) == dict: for key, data in catalog_data.data.items(): catalog_edit_form.CatalogDatas(catalog_key=key, en=data['en'], tr=data['tr']) else: catalog_edit_form.CatalogDatas(catalog_key="0", en='', tr='') self.form_out(catalog_edit_form) # schema key for get back what key will be saved, used in save_catalog form self.output["object_key"] = self.input['form']['catalog']
Get existing catalog and fill the form with the model data. If given key not found as catalog, it generates an empty catalog data form.
entailment
def save_catalog(self): """ Saves the catalog data to given key Cancels if the cmd is cancel Notifies user with the process. """ if self.input["cmd"] == 'save_catalog': try: edited_object = dict() for i in self.input["form"]["CatalogDatas"]: edited_object[i["catalog_key"]] = {"en": i["en"], "tr": i["tr"]} newobj = fixture_bucket.get(self.input["object_key"]) newobj.data = edited_object newobj.store() # notify user by passing notify in output object self.output["notify"] = "catalog: %s successfully updated." % self.input[ "object_key"] except: raise HTTPError(500, "Form object could not be saved") if self.input["cmd"] == 'cancel': self.output["notify"] = "catalog: %s canceled." % self.input["object_key"]
Saves the catalog data to given key Cancels if the cmd is cancel Notifies user with the process.
entailment
def date_to_solr(d): """ converts DD-MM-YYYY to YYYY-MM-DDT00:00:00Z""" return "{y}-{m}-{day}T00:00:00Z".format(day=d[:2], m=d[3:5], y=d[6:]) if d else d
converts DD-MM-YYYY to YYYY-MM-DDT00:00:00Z
entailment
def solr_to_date(d): """ converts YYYY-MM-DDT00:00:00Z to DD-MM-YYYY """ return "{day}:{m}:{y}".format(y=d[:4], m=d[5:7], day=d[8:10]) if d else d
converts YYYY-MM-DDT00:00:00Z to DD-MM-YYYY
entailment
def to_safe_str(s): """ converts some (tr) non-ascii chars to ascii counterparts, then return the result as lowercase """ # TODO: This is insufficient as it doesn't do anything for other non-ascii chars return re.sub(r'[^0-9a-zA-Z]+', '_', s.strip().replace(u'ğ', 'g').replace(u'ö', 'o').replace( u'ç', 'c').replace(u'Ç','c').replace(u'Ö', u'O').replace(u'Ş', 's').replace( u'Ü', 'u').replace(u'ı', 'i').replace(u'İ','i').replace(u'Ğ', 'g').replace( u'ö', 'o').replace(u'ş', 's').replace(u'ü', 'u').lower(), re.UNICODE)
converts some (tr) non-ascii chars to ascii counterparts, then return the result as lowercase
entailment
def merge_truthy(*dicts): """Merge multiple dictionaries, keeping the truthy values in case of key collisions. Accepts any number of dictionaries, or any other object that returns a 2-tuple of key and value pairs when its `.items()` method is called. If a key exists in multiple dictionaries passed to this function, the values from the latter dictionary is kept. If the value of the latter dictionary does not evaluate to True, then the value of the previous dictionary is kept. >>> merge_truthy({'a': 1, 'c': 4}, {'a': None, 'b': 2}, {'b': 3}) {'a': 1, 'b': 3, 'c': 4} """ merged = {} for d in dicts: for k, v in d.items(): merged[k] = v or merged.get(k, v) return merged
Merge multiple dictionaries, keeping the truthy values in case of key collisions. Accepts any number of dictionaries, or any other object that returns a 2-tuple of key and value pairs when its `.items()` method is called. If a key exists in multiple dictionaries passed to this function, the values from the latter dictionary is kept. If the value of the latter dictionary does not evaluate to True, then the value of the previous dictionary is kept. >>> merge_truthy({'a': 1, 'c': 4}, {'a': None, 'b': 2}, {'b': 3}) {'a': 1, 'b': 3, 'c': 4}
entailment
def perform(self): """Perform the version upgrade on the database. """ db_versions = self.table.versions() version = self.version if (version.is_processed(db_versions) and not self.config.force_version == self.version.number): self.log( u'version {} is already installed'.format(version.number) ) return self.start() try: self._perform_version(version) except Exception: if sys.version_info < (3, 4): msg = traceback.format_exc().decode('utf8', errors='ignore') else: msg = traceback.format_exc() error = u'\n'.join(self.logs + [u'\n', msg]) self.table.record_log(version.number, error) raise self.finish()
Perform the version upgrade on the database.
entailment
def _perform_version(self, version): """Inner method for version upgrade. Not intended for standalone use. This method performs the actual version upgrade with all the pre, post operations and addons upgrades. :param version: The migration version to upgrade to :type version: Instance of Version class """ if version.is_noop(): self.log(u'version {} is a noop'.format(version.number)) else: self.log(u'execute base pre-operations') for operation in version.pre_operations(): operation.execute(self.log) if self.config.mode: self.log(u'execute %s pre-operations' % self.config.mode) for operation in version.pre_operations(mode=self.config.mode): operation.execute(self.log) self.perform_addons() self.log(u'execute base post-operations') for operation in version.post_operations(): operation.execute(self.log) if self.config.mode: self.log(u'execute %s post-operations' % self.config.mode) for operation in version.post_operations(self.config.mode): operation.execute(self.log)
Inner method for version upgrade. Not intended for standalone use. This method performs the actual version upgrade with all the pre, post operations and addons upgrades. :param version: The migration version to upgrade to :type version: Instance of Version class
entailment
def logout(current): """ Log out view. Simply deletes the session object. For showing logout message: 'show_logout_message' field should be True in current.task_data, Message should be sent in current.task_data with 'logout_message' field. Message title should be sent in current.task_data with 'logout_title' field. current.task_data['show_logout_message'] = True current.task_data['logout_title'] = 'Message Title' current.task_data['logout_message'] = 'Message' Args: current: :attr:`~zengine.engine.WFCurrent` object. """ current.user.is_online(False) current.session.delete() current.output['cmd'] = 'logout' if current.task_data.get('show_logout_message', False): current.output['title'] = current.task_data.get('logout_title', None) current.output['msg'] = current.task_data.get('logout_message', None)
Log out view. Simply deletes the session object. For showing logout message: 'show_logout_message' field should be True in current.task_data, Message should be sent in current.task_data with 'logout_message' field. Message title should be sent in current.task_data with 'logout_title' field. current.task_data['show_logout_message'] = True current.task_data['logout_title'] = 'Message Title' current.task_data['logout_message'] = 'Message' Args: current: :attr:`~zengine.engine.WFCurrent` object.
entailment
def _do_upgrade(self): """ open websocket connection """ self.current.output['cmd'] = 'upgrade' self.current.output['user_id'] = self.current.user_id self.terminate_existing_login() self.current.user.bind_private_channel(self.current.session.sess_id) user_sess = UserSessionID(self.current.user_id) user_sess.set(self.current.session.sess_id) self.current.user.is_online(True) # Clean up the locale from session to allow it to be re-read from the user preferences after login for k in translation.DEFAULT_PREFS.keys(): self.current.session[k] = ''
open websocket connection
entailment
def do_view(self): """ Authenticate user with given credentials. Connects user's queue and exchange """ self.current.output['login_process'] = True self.current.task_data['login_successful'] = False if self.current.is_auth: self._do_upgrade() else: try: auth_result = self.current.auth.authenticate( self.current.input['username'], self.current.input['password']) self.current.task_data['login_successful'] = auth_result if auth_result: self._do_upgrade() except ObjectDoesNotExist: self.current.log.exception("Wrong username or another error occurred") pass except: raise if self.current.output.get('cmd') != 'upgrade': self.current.output['status_code'] = 403 else: KeepAlive(self.current.user_id).reset()
Authenticate user with given credentials. Connects user's queue and exchange
entailment
def show_view(self): """ Show :attr:`LoginForm` form. """ self.current.output['login_process'] = True if self.current.is_auth: self._do_upgrade() else: self.current.output['forms'] = LoginForm(current=self.current).serialize()
Show :attr:`LoginForm` form.
entailment
def skip(mapping): """ :param mapping: generator :return: filtered generator """ found = set() for m in mapping: matched_atoms = set(m.values()) if found.intersection(matched_atoms): continue found.update(matched_atoms) yield m
:param mapping: generator :return: filtered generator
entailment
def __get_mapping(self, structures): """ match each pattern to each molecule. if all patterns matches with all molecules return generator of all possible mapping. :param structures: disjoint molecules :return: mapping generator """ for c in permutations(structures, len(self.__patterns)): for m in product(*(x.get_substructure_mapping(y, limit=0) for x, y in zip(self.__patterns, c))): mapping = {} for i in m: mapping.update(i) if mapping: yield mapping
match each pattern to each molecule. if all patterns matches with all molecules return generator of all possible mapping. :param structures: disjoint molecules :return: mapping generator
entailment
def get(self, default=None): """ return the cached value or default if it can't be found :param default: default value :return: cached value """ d = cache.get(self.key) return ((json.loads(d.decode('utf-8')) if self.serialize else d) if d is not None else default)
return the cached value or default if it can't be found :param default: default value :return: cached value
entailment
def set(self, val, lifetime=None): """ set cache value :param val: any picklable object :param lifetime: exprition time in sec :return: val """ cache.set(self.key, (json.dumps(val) if self.serialize else val), lifetime or settings.DEFAULT_CACHE_EXPIRE_TIME) return val
set cache value :param val: any picklable object :param lifetime: exprition time in sec :return: val
entailment
def add(self, val): """ Add given value to item (list) Args: val: A JSON serializable object. Returns: Cache backend response. """ return cache.lpush(self.key, json.dumps(val) if self.serialize else val)
Add given value to item (list) Args: val: A JSON serializable object. Returns: Cache backend response.
entailment
def get_all(self): """ Get all list items. Returns: Cache backend response. """ result = cache.lrange(self.key, 0, -1) return (json.loads(item.decode('utf-8')) for item in result if item) if self.serialize else result
Get all list items. Returns: Cache backend response.
entailment
def remove_item(self, val): """ Removes given item from the list. Args: val: Item Returns: Cache backend response. """ return cache.lrem(self.key, json.dumps(val))
Removes given item from the list. Args: val: Item Returns: Cache backend response.
entailment
def flush(cls, *args): """ Removes all keys of this namespace Without args, clears all keys starting with cls.PREFIX if called with args, clears keys starting with given cls.PREFIX + args Args: *args: Arbitrary number of arguments. Returns: List of removed keys. """ return _remove_keys([], [(cls._make_key(args) if args else cls.PREFIX) + '*'])
Removes all keys of this namespace Without args, clears all keys starting with cls.PREFIX if called with args, clears keys starting with given cls.PREFIX + args Args: *args: Arbitrary number of arguments. Returns: List of removed keys.
entailment
def update_or_expire_session(self): """ Deletes session if keepalive request expired otherwise updates the keepalive timestamp value """ if not hasattr(self, 'key'): return now = time.time() timestamp = float(self.get() or 0) or now sess_id = self.sess_id or UserSessionID(self.user_id).get() if sess_id and now - timestamp > self.SESSION_EXPIRE_TIME: Session(sess_id).delete() return False else: self.set(now) return True
Deletes session if keepalive request expired otherwise updates the keepalive timestamp value
entailment
def send_message_for_lane_change(sender, **kwargs): """ Sends a message to possible owners of the current workflows next lane. Args: **kwargs: ``current`` and ``possible_owners`` are required. sender (User): User object """ current = kwargs['current'] owners = kwargs['possible_owners'] if 'lane_change_invite' in current.task_data: msg_context = current.task_data.pop('lane_change_invite') else: msg_context = DEFAULT_LANE_CHANGE_INVITE_MSG wfi = WFCache(current).get_instance() # Deletion of used passive task invitation which belongs to previous lane. TaskInvitation.objects.filter(instance=wfi, role=current.role, wf_name=wfi.wf.name).delete() today = datetime.today() for recipient in owners: inv = TaskInvitation( instance=wfi, role=recipient, wf_name=wfi.wf.name, progress=30, start_date=today, finish_date=today + timedelta(15) ) inv.title = current.task_data.get('INVITATION_TITLE') or wfi.wf.title inv.save() # try to send notification, if it fails go on try: recipient.send_notification(title=msg_context['title'], message="%s %s" % (wfi.wf.title, msg_context['body']), typ=1, # info url='', sender=sender ) except: # todo: specify which exception pass
Sends a message to possible owners of the current workflows next lane. Args: **kwargs: ``current`` and ``possible_owners`` are required. sender (User): User object
entailment
def set_password(sender, **kwargs): """ Encrypts password of the user. """ if sender.model_class.__name__ == 'User': usr = kwargs['object'] if not usr.password.startswith('$pbkdf2'): usr.set_password(usr.password) usr.save()
Encrypts password of the user.
entailment
def channel_list(self): """ Main screen for channel management. Channels listed and operations can be chosen on the screen. If there is an error message like non-choice, it is shown here. """ if self.current.task_data.get('msg', False): if self.current.task_data.get('target_channel_key', False): self.current.output['msgbox'] = {'type': 'info', "title": _(u"Successful Operation"), "msg": self.current.task_data['msg']} del self.current.task_data['msg'] else: self.show_warning_messages() self.current.task_data['new_channel'] = False _form = ChannelListForm(title=_(u'Public Channel List'), help_text=CHANNEL_CHOICE_TEXT) for channel in Channel.objects.filter(typ=15): owner_name = channel.owner.username _form.ChannelList(choice=False, name=channel.name, owner=owner_name, key=channel.key) _form.new_channel = fields.Button(_(u"Merge At New Channel"), cmd="create_new_channel") _form.existing_channel = fields.Button(_(u"Merge With An Existing Channel"), cmd="choose_existing_channel") _form.find_chosen_channel = fields.Button(_(u"Split Channel"), cmd="find_chosen_channel") self.form_out(_form)
Main screen for channel management. Channels listed and operations can be chosen on the screen. If there is an error message like non-choice, it is shown here.
entailment
def channel_choice_control(self): """ It controls errors. If there is an error, returns channel list screen with error message. """ self.current.task_data['control'], self.current.task_data['msg'] \ = self.selection_error_control(self.input['form']) if self.current.task_data['control']: self.current.task_data['option'] = self.input['cmd'] self.current.task_data['split_operation'] = False keys, names = self.return_selected_form_items(self.input['form']['ChannelList']) self.current.task_data['chosen_channels'] = keys self.current.task_data['chosen_channels_names'] = names
It controls errors. If there is an error, returns channel list screen with error message.
entailment
def create_new_channel(self): """ Features of new channel are specified like channel's name, owner etc. """ self.current.task_data['new_channel'] = True _form = NewChannelForm(Channel(), current=self.current) _form.title = _(u"Specify Features of New Channel to Create") _form.forward = fields.Button(_(u"Create"), flow="find_target_channel") self.form_out(_form)
Features of new channel are specified like channel's name, owner etc.
entailment
def save_new_channel(self): """ It saves new channel according to specified channel features. """ form_info = self.input['form'] channel = Channel(typ=15, name=form_info['name'], description=form_info['description'], owner_id=form_info['owner_id']) channel.blocking_save() self.current.task_data['target_channel_key'] = channel.key
It saves new channel according to specified channel features.
entailment
def choose_existing_channel(self): """ It is a channel choice list and chosen channels at previous step shouldn't be on the screen. """ if self.current.task_data.get('msg', False): self.show_warning_messages() _form = ChannelListForm() _form.title = _(u"Choose a Channel Which Will Be Merged With Chosen Channels") for channel in Channel.objects.filter(typ=15).exclude( key__in=self.current.task_data['chosen_channels']): owner_name = channel.owner.username _form.ChannelList(choice=False, name=channel.name, owner=owner_name, key=channel.key) _form.choose = fields.Button(_(u"Choose")) self.form_out(_form)
It is a channel choice list and chosen channels at previous step shouldn't be on the screen.
entailment
def existing_choice_control(self): """ It controls errors. It generates an error message if zero or more than one channels are selected. """ self.current.task_data['existing'] = False self.current.task_data['msg'] = _(u"You should choose just one channel to do operation.") keys, names = self.return_selected_form_items(self.input['form']['ChannelList']) if len(keys) == 1: self.current.task_data['existing'] = True self.current.task_data['target_channel_key'] = keys[0]
It controls errors. It generates an error message if zero or more than one channels are selected.
entailment
def split_channel(self): """ A channel can be splitted to new channel or other existing channel. It creates subscribers list as selectable to moved. """ if self.current.task_data.get('msg', False): self.show_warning_messages() self.current.task_data['split_operation'] = True channel = Channel.objects.get(self.current.task_data['chosen_channels'][0]) _form = SubscriberListForm(title=_(u'Choose Subscribers to Migrate')) for subscriber in Subscriber.objects.filter(channel=channel): subscriber_name = subscriber.user.username _form.SubscriberList(choice=False, name=subscriber_name, key=subscriber.key) _form.new_channel = fields.Button(_(u"Move to a New Channel"), cmd="create_new_channel") _form.existing_channel = fields.Button(_(u"Move to an Existing Channel"), cmd="choose_existing_channel") self.form_out(_form)
A channel can be splitted to new channel or other existing channel. It creates subscribers list as selectable to moved.
entailment
def subscriber_choice_control(self): """ It controls subscribers choice and generates error message if there is a non-choice. """ self.current.task_data['option'] = None self.current.task_data['chosen_subscribers'], names = self.return_selected_form_items( self.input['form']['SubscriberList']) self.current.task_data[ 'msg'] = "You should choose at least one subscriber for migration operation." if self.current.task_data['chosen_subscribers']: self.current.task_data['option'] = self.input['cmd'] del self.current.task_data['msg']
It controls subscribers choice and generates error message if there is a non-choice.
entailment
def move_complete_channel(self): """ Channels and theirs subscribers are moved completely to new channel or existing channel. """ to_channel = Channel.objects.get(self.current.task_data['target_channel_key']) chosen_channels = self.current.task_data['chosen_channels'] chosen_channels_names = self.current.task_data['chosen_channels_names'] with BlockSave(Subscriber, query_dict={'channel_id': to_channel.key}): for s in Subscriber.objects.filter(channel_id__in=chosen_channels, typ=15): s.channel = to_channel s.save() with BlockDelete(Message): Message.objects.filter(channel_id__in=chosen_channels, typ=15).delete() with BlockDelete(Channel): Channel.objects.filter(key__in=chosen_channels).delete() self.current.task_data[ 'msg'] = _(u"Chosen channels(%s) have been merged to '%s' channel successfully.") % \ (', '.join(chosen_channels_names), to_channel.name)
Channels and theirs subscribers are moved completely to new channel or existing channel.
entailment
def move_chosen_subscribers(self): """ After splitting operation, only chosen subscribers are moved to new channel or existing channel. """ from_channel = Channel.objects.get(self.current.task_data['chosen_channels'][0]) to_channel = Channel.objects.get(self.current.task_data['target_channel_key']) with BlockSave(Subscriber, query_dict={'channel_id': to_channel.key}): for subscriber in Subscriber.objects.filter( key__in=self.current.task_data['chosen_subscribers']): subscriber.channel = to_channel subscriber.save() if self.current.task_data['new_channel']: self.copy_and_move_messages(from_channel, to_channel) self.current.task_data[ 'msg'] = _(u"Chosen subscribers and messages of them migrated from '%s' channel to " u"'%s' channel successfully.") % (from_channel.name, to_channel.name)
After splitting operation, only chosen subscribers are moved to new channel or existing channel.
entailment
def copy_and_move_messages(from_channel, to_channel): """ While splitting channel and moving chosen subscribers to new channel, old channel's messages are copied and moved to new channel. Args: from_channel (Channel object): move messages from channel to_channel (Channel object): move messages to channel """ with BlockSave(Message, query_dict={'channel_id': to_channel.key}): for message in Message.objects.filter(channel=from_channel, typ=15): message.key = '' message.channel = to_channel message.save()
While splitting channel and moving chosen subscribers to new channel, old channel's messages are copied and moved to new channel. Args: from_channel (Channel object): move messages from channel to_channel (Channel object): move messages to channel
entailment
def show_warning_messages(self, title=_(u"Incorrect Operation"), box_type='warning'): """ It shows incorrect operations or successful operation messages. Args: title (string): title of message box box_type (string): type of message box (warning, info) """ msg = self.current.task_data['msg'] self.current.output['msgbox'] = {'type': box_type, "title": title, "msg": msg} del self.current.task_data['msg']
It shows incorrect operations or successful operation messages. Args: title (string): title of message box box_type (string): type of message box (warning, info)
entailment
def return_selected_form_items(form_info): """ It returns chosen keys list from a given form. Args: form_info: serialized list of dict form data Returns: selected_keys(list): Chosen keys list selected_names(list): Chosen channels' or subscribers' names. """ selected_keys = [] selected_names = [] for chosen in form_info: if chosen['choice']: selected_keys.append(chosen['key']) selected_names.append(chosen['name']) return selected_keys, selected_names
It returns chosen keys list from a given form. Args: form_info: serialized list of dict form data Returns: selected_keys(list): Chosen keys list selected_names(list): Chosen channels' or subscribers' names.
entailment
def selection_error_control(self, form_info): """ It controls the selection from the form according to the operations, and returns an error message if it does not comply with the rules. Args: form_info: Channel or subscriber form from the user Returns: True or False error message """ keys, names = self.return_selected_form_items(form_info['ChannelList']) chosen_channels_number = len(keys) if form_info['new_channel'] and chosen_channels_number < 2: return False, _( u"You should choose at least two channel to merge operation at a new channel.") elif form_info['existing_channel'] and chosen_channels_number == 0: return False, _( u"You should choose at least one channel to merge operation with existing channel.") elif form_info['find_chosen_channel'] and chosen_channels_number != 1: return False, _(u"You should choose one channel for split operation.") return True, None
It controls the selection from the form according to the operations, and returns an error message if it does not comply with the rules. Args: form_info: Channel or subscriber form from the user Returns: True or False error message
entailment
def _eratosthenes(): """Yields the sequence of prime numbers via the Sieve of Eratosthenes.""" d = {} # map each composite integer to its first-found prime factor for q in count(2): # q gets 2, 3, 4, 5, ... ad infinitum p = d.pop(q, None) if p is None: # q not a key in D, so q is prime, therefore, yield it yield q # mark q squared as not-prime (with q as first-found prime factor) d[q * q] = q else: # let x <- smallest (N*p)+q which wasn't yet known to be composite # we just learned x is composite, with p first-found prime factor, # since p is the first-found prime factor of q -- find and mark it x = p + q while x in d: x += p d[x] = p
Yields the sequence of prime numbers via the Sieve of Eratosthenes.
entailment
def atoms_order(self): """ Morgan like algorithm for graph nodes ordering :return: dict of atom-weight pairs """ if not len(self): # for empty containers return {} elif len(self) == 1: # optimize single atom containers return dict.fromkeys(self, 2) params = {n: (int(node), tuple(sorted(int(edge) for edge in self._adj[n].values()))) for n, node in self.atoms()} newlevels = {} countprime = iter(primes) weights = {x: newlevels.get(y) or newlevels.setdefault(y, next(countprime)) for x, y in sorted(params.items(), key=itemgetter(1))} tries = len(self) * 4 numb = len(set(weights.values())) stab = 0 while tries: oldnumb = numb neweights = {} countprime = iter(primes) # weights[n] ** 2 NEED for differentiation of molecules like A-B or any other complete graphs. tmp = {n: reduce(mul, (weights[x] for x in m), weights[n] ** 2) for n, m in self._adj.items()} weights = {x: (neweights.get(y) or neweights.setdefault(y, next(countprime))) for x, y in sorted(tmp.items(), key=itemgetter(1))} numb = len(set(weights.values())) if numb == len(self): # each atom now unique break elif numb == oldnumb: x = Counter(weights.values()) if x[min(x)] > 1: if stab == 3: break elif stab >= 2: break stab += 1 elif stab: stab = 0 tries -= 1 if not tries and numb < oldnumb: warning('morgan. number of attempts exceeded. uniqueness has decreased. next attempt will be made') tries = 1 else: warning('morgan. number of attempts exceeded') return weights
Morgan like algorithm for graph nodes ordering :return: dict of atom-weight pairs
entailment
def init_manual(cls, pawn_value, knight_value, bishop_value, rook_value, queen_value, king_value): """ Manual init method for external piece values :type: PAWN_VALUE: int :type: KNIGHT_VALUE: int :type: BISHOP_VALUE: int :type: ROOK_VALUE: int :type: QUEEN_VALUE: int """ piece_values = cls() piece_values.PAWN_VALUE = pawn_value piece_values.KNIGHT_VALUE = knight_value piece_values.BISHOP_VALUE = bishop_value piece_values.ROOK_VALUE = rook_value piece_values.QUEEN_VALUE = queen_value piece_values.KING_VALUE = king_value return piece_values
Manual init method for external piece values :type: PAWN_VALUE: int :type: KNIGHT_VALUE: int :type: BISHOP_VALUE: int :type: ROOK_VALUE: int :type: QUEEN_VALUE: int
entailment
def val(self, piece, ref_color): """ Finds value of ``Piece`` :type: piece: Piece :type: ref_color: Color :rtype: int """ if piece is None: return 0 if ref_color == piece.color: const = 1 else: const = -1 if isinstance(piece, Pawn): return self.PAWN_VALUE * const elif isinstance(piece, Queen): return self.QUEEN_VALUE * const elif isinstance(piece, Bishop): return self.BISHOP_VALUE * const elif isinstance(piece, Rook): return self.ROOK_VALUE * const elif isinstance(piece, Knight): return self.KNIGHT_VALUE * const elif isinstance(piece, King): return self.KING_VALUE * const return 0
Finds value of ``Piece`` :type: piece: Piece :type: ref_color: Color :rtype: int
entailment
def play(self): """ Starts game and returns one of 3 results . Iterates between methods ``white_move()`` and ``black_move()`` until game ends. Each method calls the respective player's ``generate_move()`` method. :rtype: int """ colors = [lambda: self.white_move(), lambda: self.black_move()] colors = itertools.cycle(colors) while True: color_fn = next(colors) if game_state.no_moves(self.position): if self.position.get_king(color.white).in_check(self.position): return 1 elif self.position.get_king(color.black).in_check(self.position): return 0 else: return 0.5 color_fn()
Starts game and returns one of 3 results . Iterates between methods ``white_move()`` and ``black_move()`` until game ends. Each method calls the respective player's ``generate_move()`` method. :rtype: int
entailment
def white_move(self): """ Calls the white player's ``generate_move()`` method and updates the board with the move returned. """ move = self.player_white.generate_move(self.position) move = make_legal(move, self.position) self.position.update(move)
Calls the white player's ``generate_move()`` method and updates the board with the move returned.
entailment
def black_move(self): """ Calls the black player's ``generate_move()`` method and updates the board with the move returned. """ move = self.player_black.generate_move(self.position) move = make_legal(move, self.position) self.position.update(move)
Calls the black player's ``generate_move()`` method and updates the board with the move returned.
entailment
def get_field_cache(self, cache_type='es'): """Return a list of fields' mappings""" if cache_type == 'kibana': try: search_results = urlopen(self.get_url).read().decode('utf-8') except HTTPError: # as e: # self.pr_err("get_field_cache(kibana), HTTPError: %s" % e) return [] index_pattern = json.loads(search_results) # Results look like: {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":6,"found":true,"_source":{"title":"aaa*","fields":"<what we want>"}} # noqa fields_str = index_pattern['_source']['fields'] return json.loads(fields_str) elif cache_type == 'es' or cache_type.startswith('elastic'): search_results = urlopen(self.es_get_url).read().decode('utf-8') es_mappings = json.loads(search_results) # Results look like: {"<index_name>":{"mappings":{"<doc_type>":{"<field_name>":{"full_name":"<field_name>","mapping":{"<sub-field_name>":{"type":"date","index_name":"<sub-field_name>","boost":1.0,"index":"not_analyzed","store":false,"doc_values":false,"term_vector":"no","norms":{"enabled":false},"index_options":"docs","index_analyzer":"_date/16","search_analyzer":"_date/max","postings_format":"default","doc_values_format":"default","similarity":"default","fielddata":{},"ignore_malformed":false,"coerce":true,"precision_step":16,"format":"dateOptionalTime","null_value":null,"include_in_all":false,"numeric_resolution":"milliseconds","locale":""}}}, # noqa # now convert the mappings into the .kibana format field_cache = [] for (index_name, val) in iteritems(es_mappings): if index_name != self.index: # only get non-'.kibana' indices # self.pr_dbg("index: %s" % index_name) m_dict = es_mappings[index_name]['mappings'] # self.pr_dbg('m_dict %s' % m_dict) mappings = self.get_index_mappings(m_dict) # self.pr_dbg('mappings %s' % mappings) field_cache.extend(mappings) field_cache = self.dedup_field_cache(field_cache) return field_cache self.pr_err("Unknown cache type: %s" % cache_type) return None
Return a list of fields' mappings
entailment
def post_field_cache(self, field_cache): """Where field_cache is a list of fields' mappings""" index_pattern = self.field_cache_to_index_pattern(field_cache) # self.pr_dbg("request/post: %s" % index_pattern) resp = requests.post(self.post_url, data=index_pattern).text # resp = {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":1,"created":true} # noqa resp = json.loads(resp) return 0
Where field_cache is a list of fields' mappings
entailment
def field_cache_to_index_pattern(self, field_cache): """Return a .kibana index-pattern doc_type""" mapping_dict = {} mapping_dict['customFormats'] = "{}" mapping_dict['title'] = self.index_pattern # now post the data into .kibana mapping_dict['fields'] = json.dumps(field_cache, separators=(',', ':')) # in order to post, we need to create the post string mapping_str = json.dumps(mapping_dict, separators=(',', ':')) return mapping_str
Return a .kibana index-pattern doc_type
entailment
def check_mapping(self, m): """Assert minimum set of fields in cache, does not validate contents""" if 'name' not in m: self.pr_dbg("Missing %s" % "name") return False # self.pr_dbg("Checking %s" % m['name']) for x in ['analyzed', 'indexed', 'type', 'scripted', 'count']: if x not in m or m[x] == "": self.pr_dbg("Missing %s" % x) self.pr_dbg("Full %s" % m) return False if 'doc_values' not in m or m['doc_values'] == "": if not m['name'].startswith('_'): self.pr_dbg("Missing %s" % "doc_values") return False m['doc_values'] = False return True
Assert minimum set of fields in cache, does not validate contents
entailment
def get_index_mappings(self, index): """Converts all index's doc_types to .kibana""" fields_arr = [] for (key, val) in iteritems(index): # self.pr_dbg("\tdoc_type: %s" % key) doc_mapping = self.get_doc_type_mappings(index[key]) # self.pr_dbg("\tdoc_mapping: %s" % doc_mapping) if doc_mapping is None: return None # keep adding to the fields array fields_arr.extend(doc_mapping) return fields_arr
Converts all index's doc_types to .kibana
entailment
def get_doc_type_mappings(self, doc_type): """Converts all doc_types' fields to .kibana""" doc_fields_arr = [] found_score = False for (key, val) in iteritems(doc_type): # self.pr_dbg("\t\tfield: %s" % key) # self.pr_dbg("\tval: %s" % val) add_it = False retdict = {} # _ are system if not key.startswith('_'): if 'mapping' not in doc_type[key]: self.pr_err("No mapping in doc_type[%s]" % key) return None if key in doc_type[key]['mapping']: subkey_name = key else: subkey_name = re.sub('.*\.', '', key) if subkey_name not in doc_type[key]['mapping']: self.pr_err( "Couldn't find subkey " + "doc_type[%s]['mapping'][%s]" % (key, subkey_name)) return None # self.pr_dbg("\t\tsubkey_name: %s" % subkey_name) retdict = self.get_field_mappings( doc_type[key]['mapping'][subkey_name]) add_it = True # system mappings don't list a type, # but kibana makes them all strings if key in self.sys_mappings: retdict['analyzed'] = False retdict['indexed'] = False if key == '_source': retdict = self.get_field_mappings( doc_type[key]['mapping'][key]) retdict['type'] = "_source" elif key == '_score': retdict['type'] = "number" elif 'type' not in retdict: retdict['type'] = "string" add_it = True if add_it: retdict['name'] = key retdict['count'] = 0 # always init to 0 retdict['scripted'] = False # I haven't observed a True yet if not self.check_mapping(retdict): self.pr_err("Error, invalid mapping") return None # the fields element is an escaped array of json # make the array here, after all collected, then escape it doc_fields_arr.append(retdict) if not found_score: doc_fields_arr.append( {"name": "_score", "type": "number", "count": 0, "scripted": False, "indexed": False, "analyzed": False, "doc_values": False}) return doc_fields_arr
Converts all doc_types' fields to .kibana
entailment
def get_field_mappings(self, field): """Converts ES field mappings to .kibana field mappings""" retdict = {} retdict['indexed'] = False retdict['analyzed'] = False for (key, val) in iteritems(field): if key in self.mappings: if (key == 'type' and (val == "long" or val == "integer" or val == "double" or val == "float")): val = "number" # self.pr_dbg("\t\t\tkey: %s" % key) # self.pr_dbg("\t\t\t\tval: %s" % val) retdict[key] = val if key == 'index' and val != "no": retdict['indexed'] = True # self.pr_dbg("\t\t\tkey: %s" % key) # self.pr_dbg("\t\t\t\tval: %s" % val) if val == "analyzed": retdict['analyzed'] = True return retdict
Converts ES field mappings to .kibana field mappings
entailment
def is_kibana_cache_incomplete(self, es_cache, k_cache): """Test if k_cache is incomplete Assume k_cache is always correct, but could be missing new fields that es_cache has """ # convert list into dict, with each item's ['name'] as key k_dict = {} for field in k_cache: # self.pr_dbg("field: %s" % field) k_dict[field['name']] = field for ign_f in self.mappings_ignore: k_dict[field['name']][ign_f] = 0 es_dict = {} for field in es_cache: es_dict[field['name']] = field for ign_f in self.mappings_ignore: es_dict[field['name']][ign_f] = 0 es_set = set(es_dict.keys()) k_set = set(k_dict.keys()) # reasons why kibana cache could be incomplete: # k_dict is missing keys that are within es_dict # We don't care if k has keys that es doesn't # es {1,2} k {1,2,3}; intersection {1,2}; len(es-{}) 0 # es {1,2} k {1,2}; intersection {1,2}; len(es-{}) 0 # es {1,2} k {}; intersection {}; len(es-{}) 2 # es {1,2} k {1}; intersection {1}; len(es-{}) 1 # es {2,3} k {1}; intersection {}; len(es-{}) 2 # es {2,3} k {1,2}; intersection {2}; len(es-{}) 1 return len(es_set - k_set.intersection(es_set)) > 0
Test if k_cache is incomplete Assume k_cache is always correct, but could be missing new fields that es_cache has
entailment
def list_to_compare_dict(self, list_form): """Convert list into a data structure we can query easier""" compare_dict = {} for field in list_form: if field['name'] in compare_dict: self.pr_dbg("List has duplicate field %s:\n%s" % (field['name'], compare_dict[field['name']])) if compare_dict[field['name']] != field: self.pr_dbg("And values are different:\n%s" % field) return None compare_dict[field['name']] = field for ign_f in self.mappings_ignore: compare_dict[field['name']][ign_f] = 0 return compare_dict
Convert list into a data structure we can query easier
entailment
def compare_field_caches(self, replica, original): """Verify original is subset of replica""" if original is None: original = [] if replica is None: replica = [] self.pr_dbg("Comparing orig with %s fields to replica with %s fields" % (len(original), len(replica))) # convert list into dict, with each item's ['name'] as key orig = self.list_to_compare_dict(original) if orig is None: self.pr_dbg("Original has duplicate fields") return 1 repl = self.list_to_compare_dict(replica) if repl is None: self.pr_dbg("Replica has duplicate fields") return 1 # search orig for each item in repl # if any items in repl not within orig or vice versa, then complain # make sure contents of each item match orig_found = {} for (key, field) in iteritems(repl): field_name = field['name'] if field_name not in orig: self.pr_dbg("Replica has field not found in orig %s: %s" % (field_name, field)) return 1 orig_found[field_name] = True if orig[field_name] != field: self.pr_dbg("Field in replica doesn't match orig:") self.pr_dbg("orig:%s\nrepl:%s" % (orig[field_name], field)) return 1 unfound = set(orig_found.keys()) - set(repl.keys()) if len(unfound) > 0: self.pr_dbg("Orig contains fields that were not in replica") self.pr_dbg('%s' % unfound) return 1 # We don't care about case when replica has more fields than orig # unfound = set(repl.keys()) - set(orig_found.keys()) # if len(unfound) > 0: # self.pr_dbg("Replica contains fields that were not in orig") # self.pr_dbg('%s' % unfound) # return 1 self.pr_dbg("Original matches replica") return 0
Verify original is subset of replica
entailment
def start_daemon_thread(target, args=()): """starts a deamon thread for a given target function and arguments.""" th = Thread(target=target, args=args) th.daemon = True th.start() return th
starts a deamon thread for a given target function and arguments.
entailment
def serialize_dict_keys(d, prefix=""): """returns all the keys in a dictionary. >>> serialize_dict_keys({"a": {"b": {"c": 1, "b": 2} } }) ['a', 'a.b', 'a.b.c', 'a.b.b'] """ keys = [] for k, v in d.iteritems(): fqk = '%s%s' % (prefix, k) keys.append(fqk) if isinstance(v, dict): keys.extend(serialize_dict_keys(v, prefix="%s." % fqk)) return keys
returns all the keys in a dictionary. >>> serialize_dict_keys({"a": {"b": {"c": 1, "b": 2} } }) ['a', 'a.b', 'a.b.c', 'a.b.b']
entailment
def set_user(self, user): """ Writes user data to session. Args: user: User object """ self.session['user_id'] = user.key self.session['user_data'] = user.clean_value() role = self.get_role() # TODO: this should be remembered from previous login # self.session['role_data'] = default_role.clean_value() self.session['role_id'] = role.key self.current.role_id = role.key self.current.user_id = user.key # self.perm_cache = PermissionCache(role.key) self.session['permissions'] = role.get_permissions()
Writes user data to session. Args: user: User object
entailment
def contains_opposite_color_piece(self, square, position): """ Finds if square on the board is occupied by a ``Piece`` belonging to the opponent. :type: square: Location :type: position: Board :rtype: bool """ return not position.is_square_empty(square) and \ position.piece_at_square(square).color != self.color
Finds if square on the board is occupied by a ``Piece`` belonging to the opponent. :type: square: Location :type: position: Board :rtype: bool
entailment
def gettext(message, domain=DEFAULT_DOMAIN): """Mark a message as translateable, and translate it. All messages in the application that are translateable should be wrapped with this function. When importing this function, it should be renamed to '_'. For example: .. code-block:: python from zengine.lib.translation import gettext as _ print(_('Hello, world!')) 'Merhaba, dünya!' For the messages that will be formatted later on, instead of using the position-based formatting, key-based formatting should be used. This gives the translator an idea what the variables in the format are going to be, and makes it possible for the translator to reorder the variables. For example: .. code-block:: python name, number = 'Elizabeth', 'II' _('Queen %(name)s %(number)s') % {'name': name, 'number': number} 'Kraliçe II. Elizabeth' The message returned by this function depends on the language of the current user. If this function is called before a language is installed (which is normally done by ZEngine when the user connects), this function will simply return the message without modification. If there are messages containing unicode characters, in Python 2 these messages must be marked as unicode. Otherwise, python will not be able to correctly match these messages with translations. For example: .. code-block:: python print(_('Café')) 'Café' print(_(u'Café')) 'Kahve' Args: message (basestring, unicode): The input message. domain (basestring): The domain of the message. Defaults to 'messages', which is the domain where all application messages should be located. Returns: unicode: The translated message. """ if six.PY2: return InstalledLocale._active_catalogs[domain].ugettext(message) else: return InstalledLocale._active_catalogs[domain].gettext(message)
Mark a message as translateable, and translate it. All messages in the application that are translateable should be wrapped with this function. When importing this function, it should be renamed to '_'. For example: .. code-block:: python from zengine.lib.translation import gettext as _ print(_('Hello, world!')) 'Merhaba, dünya!' For the messages that will be formatted later on, instead of using the position-based formatting, key-based formatting should be used. This gives the translator an idea what the variables in the format are going to be, and makes it possible for the translator to reorder the variables. For example: .. code-block:: python name, number = 'Elizabeth', 'II' _('Queen %(name)s %(number)s') % {'name': name, 'number': number} 'Kraliçe II. Elizabeth' The message returned by this function depends on the language of the current user. If this function is called before a language is installed (which is normally done by ZEngine when the user connects), this function will simply return the message without modification. If there are messages containing unicode characters, in Python 2 these messages must be marked as unicode. Otherwise, python will not be able to correctly match these messages with translations. For example: .. code-block:: python print(_('Café')) 'Café' print(_(u'Café')) 'Kahve' Args: message (basestring, unicode): The input message. domain (basestring): The domain of the message. Defaults to 'messages', which is the domain where all application messages should be located. Returns: unicode: The translated message.
entailment
def gettext_lazy(message, domain=DEFAULT_DOMAIN): """Mark a message as translatable, but delay the translation until the message is used. Sometimes, there are some messages that need to be translated, but the translation can't be done at the point the message itself is written. For example, the names of the fields in a Model can't be translated at the point they are written, otherwise the translation would be done when the file is imported, long before a user even connects. To avoid this, `gettext_lazy` should be used. For example: .. code-block:: python from zengine.lib.translation import gettext_lazy, InstalledLocale from pyoko import model, fields class User(model.Model): name = fields.String(gettext_lazy('User Name')) print(User.name.title) 'User Name' InstalledLocale.install_language('tr') print(User.name.title) 'Kullanıcı Adı' Args: message (basestring, unicode): The input message. domain (basestring): The domain of the message. Defaults to 'messages', which is the domain where all application messages should be located. Returns: unicode: The translated message, with the translation itself being delayed until the text is actually used. """ return LazyProxy(gettext, message, domain=domain, enable_cache=False)
Mark a message as translatable, but delay the translation until the message is used. Sometimes, there are some messages that need to be translated, but the translation can't be done at the point the message itself is written. For example, the names of the fields in a Model can't be translated at the point they are written, otherwise the translation would be done when the file is imported, long before a user even connects. To avoid this, `gettext_lazy` should be used. For example: .. code-block:: python from zengine.lib.translation import gettext_lazy, InstalledLocale from pyoko import model, fields class User(model.Model): name = fields.String(gettext_lazy('User Name')) print(User.name.title) 'User Name' InstalledLocale.install_language('tr') print(User.name.title) 'Kullanıcı Adı' Args: message (basestring, unicode): The input message. domain (basestring): The domain of the message. Defaults to 'messages', which is the domain where all application messages should be located. Returns: unicode: The translated message, with the translation itself being delayed until the text is actually used.
entailment
def ngettext(singular, plural, n, domain=DEFAULT_DOMAIN): """Mark a message as translateable, and translate it considering plural forms. Some messages may need to change based on a number. For example, consider a message like the following: .. code-block:: python def alert_msg(msg_count): print( 'You have %d %s' % (msg_count, 'message' if msg_count == 1 else 'messages')) alert_msg(1) 'You have 1 message' alert_msg(5) 'You have 5 messages' To translate this message, you can use ngettext to consider the plural forms: .. code-block:: python from zengine.lib.translation import ngettext def alert_msg(msg_count): print(ngettext('You have %(count)d message', 'You have %(count)d messages', msg_count) % {'count': msg_count}) alert_msg(1) '1 mesajınız var' alert_msg(5) '5 mesajlarınız var' When doing formatting, both singular and plural forms of the message should have the exactly same variables. Args: singular (unicode): The singular form of the message. plural (unicode): The plural form of the message. n (int): The number that is used to decide which form should be used. domain (basestring): The domain of the message. Defaults to 'messages', which is the domain where all application messages should be located. Returns: unicode: The correct pluralization, translated. """ if six.PY2: return InstalledLocale._active_catalogs[domain].ungettext(singular, plural, n) else: return InstalledLocale._active_catalogs[domain].ngettext(singular, plural, n)
Mark a message as translateable, and translate it considering plural forms. Some messages may need to change based on a number. For example, consider a message like the following: .. code-block:: python def alert_msg(msg_count): print( 'You have %d %s' % (msg_count, 'message' if msg_count == 1 else 'messages')) alert_msg(1) 'You have 1 message' alert_msg(5) 'You have 5 messages' To translate this message, you can use ngettext to consider the plural forms: .. code-block:: python from zengine.lib.translation import ngettext def alert_msg(msg_count): print(ngettext('You have %(count)d message', 'You have %(count)d messages', msg_count) % {'count': msg_count}) alert_msg(1) '1 mesajınız var' alert_msg(5) '5 mesajlarınız var' When doing formatting, both singular and plural forms of the message should have the exactly same variables. Args: singular (unicode): The singular form of the message. plural (unicode): The plural form of the message. n (int): The number that is used to decide which form should be used. domain (basestring): The domain of the message. Defaults to 'messages', which is the domain where all application messages should be located. Returns: unicode: The correct pluralization, translated.
entailment
def ngettext_lazy(singular, plural, n, domain=DEFAULT_DOMAIN): """Mark a message with plural forms translateable, and delay the translation until the message is used. Works the same was a `ngettext`, with a delaying functionality similiar to `gettext_lazy`. Args: singular (unicode): The singular form of the message. plural (unicode): The plural form of the message. n (int): The number that is used to decide which form should be used. domain (basestring): The domain of the message. Defaults to 'messages', which is the domain where all application messages should be located. Returns: unicode: The correct pluralization, with the translation being delayed until the message is used. """ return LazyProxy(ngettext, singular, plural, n, domain=domain, enable_cache=False)
Mark a message with plural forms translateable, and delay the translation until the message is used. Works the same was a `ngettext`, with a delaying functionality similiar to `gettext_lazy`. Args: singular (unicode): The singular form of the message. plural (unicode): The plural form of the message. n (int): The number that is used to decide which form should be used. domain (basestring): The domain of the message. Defaults to 'messages', which is the domain where all application messages should be located. Returns: unicode: The correct pluralization, with the translation being delayed until the message is used.
entailment
def _wrap_locale_formatter(fn, locale_type): """Wrap a Babel data formatting function to automatically format for currently installed locale.""" def wrapped_locale_formatter(*args, **kwargs): """A Babel formatting function, wrapped to automatically use the currently installed language. The wrapped function will not throw any exceptions for unknown locales, if Babel doesn't recognise the locale, we will simply fall back to the default language. The locale used by the wrapped function can be overriden by passing it a `locale` keyword. To learn more about this function, check the documentation of Babel for the function of the same name. """ # Get the current locale from the class kwargs_ = {'locale': getattr(InstalledLocale, locale_type)} # By creating a dict then updating it, we allow locale to be overridden kwargs_.update(kwargs) try: formatted = fn(*args, **kwargs_) except UnknownLocaleError: log.warning( """Can\'t do formatting for language code {locale}, falling back to default {default}""".format( locale=kwargs_['locale'], default=settings.DEFAULT_LANG) ) kwargs_['locale'] = settings.DEFAULT_LANG formatted = fn(*args, **kwargs_) return formatted return wrapped_locale_formatter
Wrap a Babel data formatting function to automatically format for currently installed locale.
entailment
def install_language(cls, language_code): """Install the translations for language specified by `language_code`. If we don't have translations for this language, then the default language will be used. If the language specified is already installed, then this is a no-op. """ # Skip if the language is already installed if language_code == cls.language: return try: cls._active_catalogs = cls._translation_catalogs[language_code] cls.language = language_code log.debug('Installed language %s', language_code) except KeyError: default = settings.DEFAULT_LANG log.warning('Unknown language %s, falling back to %s', language_code, default) cls._active_catalogs = cls._translation_catalogs[default] cls.language = default
Install the translations for language specified by `language_code`. If we don't have translations for this language, then the default language will be used. If the language specified is already installed, then this is a no-op.
entailment
def install_locale(cls, locale_code, locale_type): """Install the locale specified by `language_code`, for localizations of type `locale_type`. If we can't perform localized formatting for the specified locale, then the default localization format will be used. If the locale specified is already installed for the selected type, then this is a no-op. """ # Skip if the locale is already installed if locale_code == getattr(cls, locale_type): return try: # We create a Locale instance to see if the locale code is supported locale = Locale(locale_code) log.debug('Installed locale %s', locale_code) except UnknownLocaleError: default = settings.DEFAULT_LOCALIZATION_FORMAT log.warning('Unknown locale %s, falling back to %s', locale_code, default) locale = Locale(default) setattr(cls, locale_type, locale.language)
Install the locale specified by `language_code`, for localizations of type `locale_type`. If we can't perform localized formatting for the specified locale, then the default localization format will be used. If the locale specified is already installed for the selected type, then this is a no-op.
entailment
def _rotate_vector(x, y, x2, y2, x1, y1): """ rotate x,y vector over x2-x1, y2-y1 angle """ angle = atan2(y2 - y1, x2 - x1) cos_rad = cos(angle) sin_rad = sin(angle) return cos_rad * x + sin_rad * y, -sin_rad * x + cos_rad * y
rotate x,y vector over x2-x1, y2-y1 angle
entailment
def _build_static_table_mapping(): """ Build static table mapping from header name to tuple with next structure: (<minimal index of header>, <mapping from header value to it index>). static_table_mapping used for hash searching. """ static_table_mapping = {} for index, (name, value) in enumerate(CocaineHeaders.STATIC_TABLE, 1): header_name_search_result = static_table_mapping.setdefault(name, (index, {})) header_name_search_result[1][value] = index return static_table_mapping
Build static table mapping from header name to tuple with next structure: (<minimal index of header>, <mapping from header value to it index>). static_table_mapping used for hash searching.
entailment
def get_by_index(self, index): """ Returns the entry specified by index Note that the table is 1-based ie an index of 0 is invalid. This is due to the fact that a zero value index signals that a completely unindexed header follows. The entry will either be from the static table or the dynamic table depending on the value of index. """ index -= 1 if 0 <= index < len(CocaineHeaders.STATIC_TABLE): return CocaineHeaders.STATIC_TABLE[index] index -= len(CocaineHeaders.STATIC_TABLE) if 0 <= index < len(self.dynamic_entries): return self.dynamic_entries[index] raise InvalidTableIndex("Invalid table index %d" % index)
Returns the entry specified by index Note that the table is 1-based ie an index of 0 is invalid. This is due to the fact that a zero value index signals that a completely unindexed header follows. The entry will either be from the static table or the dynamic table depending on the value of index.
entailment
def add(self, name, value): """ Adds a new entry to the table We reduce the table size if the entry will make the table size greater than maxsize. """ # We just clear the table if the entry is too big size = table_entry_size(name, value) if size > self._maxsize: self.dynamic_entries.clear() self._current_size = 0 # Add new entry if the table actually has a size elif self._maxsize > 0: self.dynamic_entries.appendleft((name, value)) self._current_size += size self._shrink()
Adds a new entry to the table We reduce the table size if the entry will make the table size greater than maxsize.
entailment
def search(self, name, value): """ Searches the table for the entry specified by name and value Returns one of the following: - ``None``, no match at all - ``(index, name, None)`` for partial matches on name only. - ``(index, name, value)`` for perfect matches. """ partial = None header_name_search_result = CocaineHeaders.STATIC_TABLE_MAPPING.get(name) if header_name_search_result: index = header_name_search_result[1].get(value) if index is not None: return index, name, value partial = (header_name_search_result[0], name, None) offset = len(CocaineHeaders.STATIC_TABLE) for (i, (n, v)) in enumerate(self.dynamic_entries): if n == name: if v == value: return i + offset + 1, n, v elif partial is None: partial = (i + offset + 1, n, None) return partial
Searches the table for the entry specified by name and value Returns one of the following: - ``None``, no match at all - ``(index, name, None)`` for partial matches on name only. - ``(index, name, value)`` for perfect matches.
entailment
def _shrink(self): """ Shrinks the dynamic table to be at or below maxsize """ cursize = self._current_size while cursize > self._maxsize: name, value = self.dynamic_entries.pop() cursize -= table_entry_size(name, value) self._current_size = cursize
Shrinks the dynamic table to be at or below maxsize
entailment
def add(self, name, value): # type: (str, str) -> None """Adds a new value for the given key.""" self._last_key = name if name in self: self._dict[name] = value self._as_list[name].append(value) else: self[name] = value
Adds a new value for the given key.
entailment
def get_all(self): # type: () -> typing.Iterable[typing.Tuple[str, str]] """Returns an iterable of all (name, value) pairs. If a header has multiple values, multiple pairs will be returned with the same name. """ for name, values in six.iteritems(self._as_list): for value in values: yield (name, value)
Returns an iterable of all (name, value) pairs. If a header has multiple values, multiple pairs will be returned with the same name.
entailment
def safe_print(ustring, errors='replace', **kwargs): """ Safely print a unicode string """ encoding = sys.stdout.encoding or 'utf-8' if sys.version_info[0] == 3: print(ustring, **kwargs) else: bytestr = ustring.encode(encoding, errors=errors) print(bytestr, **kwargs)
Safely print a unicode string
entailment
def edit_permissions(self): """Creates the view used to edit permissions. To create the view, data in the following format is passed to the UI in the objects field: .. code-block:: python { "type": "tree-toggle", "action": "set_permission", "tree": [ { "checked": true, "name": "Workflow 1 Name", "id": "workflow1", "children": [ { "checked": true, "name": "Task 1 Name", "id": "workflow1..task1", "children": [] }, { "checked": false, "id": "workflow1..task2", "name": "Task 2 Name", "children": [] } ] }, { "checked": true, "name": "Workflow 2 Name", "id": "workflow2", "children": [ { "checked": true, "name": "Workflow 2 Lane 1 Name", "id": "workflow2.lane1", "children": [ { "checked": true, "name": "Workflow 2 Task 1 Name", "id": "workflow2.lane1.task1", "children": [] }, { "checked": false, "name": "Workflow 2 Task 2 Name", "id": "workflow2.lane1.task2", "children": [] } ] } ] } ] } "type" field denotes that the object is a tree view which has elements that can be toggled. "action" field is the "name" field is the human readable name. "id" field is used to make requests to the backend. "checked" field shows whether the role has the permission or not. "children" field is the sub-permissions of the permission. """ # Get the role that was selected in the CRUD view key = self.current.input['object_id'] self.current.task_data['role_id'] = key role = RoleModel.objects.get(key=key) # Get the cached permission tree, or build a new one if there is none cached # TODO: Add an extra view in case there was no cache, as in 'please wait calculating permissions' permission_tree = self._permission_trees(PermissionModel.objects) # Apply the selected role to the permission tree, setting the 'checked' field # of the permission the role has role_tree = self._apply_role_tree(permission_tree, role) # Apply final formatting, and output the tree to the UI self.output['objects'] = [ { 'type': 'tree-toggle', 'action': 'apply_change', 'trees': self._format_tree_output(role_tree), }, ] self.form_out(PermissionForm())
Creates the view used to edit permissions. To create the view, data in the following format is passed to the UI in the objects field: .. code-block:: python { "type": "tree-toggle", "action": "set_permission", "tree": [ { "checked": true, "name": "Workflow 1 Name", "id": "workflow1", "children": [ { "checked": true, "name": "Task 1 Name", "id": "workflow1..task1", "children": [] }, { "checked": false, "id": "workflow1..task2", "name": "Task 2 Name", "children": [] } ] }, { "checked": true, "name": "Workflow 2 Name", "id": "workflow2", "children": [ { "checked": true, "name": "Workflow 2 Lane 1 Name", "id": "workflow2.lane1", "children": [ { "checked": true, "name": "Workflow 2 Task 1 Name", "id": "workflow2.lane1.task1", "children": [] }, { "checked": false, "name": "Workflow 2 Task 2 Name", "id": "workflow2.lane1.task2", "children": [] } ] } ] } ] } "type" field denotes that the object is a tree view which has elements that can be toggled. "action" field is the "name" field is the human readable name. "id" field is used to make requests to the backend. "checked" field shows whether the role has the permission or not. "children" field is the sub-permissions of the permission.
entailment
def _permission_trees(permissions): """Get the cached permission tree, or build a new one if necessary.""" treecache = PermissionTreeCache() cached = treecache.get() if not cached: tree = PermissionTreeBuilder() for permission in permissions: tree.insert(permission) result = tree.serialize() treecache.set(result) return result return cached
Get the cached permission tree, or build a new one if necessary.
entailment
def _apply_role_tree(self, perm_tree, role): """In permission tree, sets `'checked': True` for the permissions that the role has.""" role_permissions = role.get_permissions() for perm in role_permissions: self._traverse_tree(perm_tree, perm)['checked'] = True return perm_tree
In permission tree, sets `'checked': True` for the permissions that the role has.
entailment
def _traverse_tree(tree, path): """Traverses the permission tree, returning the permission at given permission path.""" path_steps = (step for step in path.split('.') if step != '') # Special handling for first step, because the first step isn't under 'objects' first_step = path_steps.next() subtree = tree[first_step] for step in path_steps: subtree = subtree['children'][step] return subtree
Traverses the permission tree, returning the permission at given permission path.
entailment
def _format_subtree(self, subtree): """Recursively format all subtrees.""" subtree['children'] = list(subtree['children'].values()) for child in subtree['children']: self._format_subtree(child) return subtree
Recursively format all subtrees.
entailment
def apply_change(self): """Applies changes to the permissions of the role. To make a change to the permission of the role, a request in the following format should be sent: .. code-block:: python { 'change': { 'id': 'workflow2.lane1.task1', 'checked': false }, } The 'id' field of the change is the id of the tree element that was sent to the UI (see `Permissions.edit_permissions`). 'checked' field is the new state of the element. """ changes = self.input['change'] key = self.current.task_data['role_id'] role = RoleModel.objects.get(key=key) for change in changes: permission = PermissionModel.objects.get(code=change['id']) if change['checked'] is True: role.add_permission(permission) else: role.remove_permission(permission) role.save()
Applies changes to the permissions of the role. To make a change to the permission of the role, a request in the following format should be sent: .. code-block:: python { 'change': { 'id': 'workflow2.lane1.task1', 'checked': false }, } The 'id' field of the change is the id of the tree element that was sent to the UI (see `Permissions.edit_permissions`). 'checked' field is the new state of the element.
entailment
def seek(self, offset): """ shifts on a given number of record in the original file :param offset: number of record """ if self._shifts: if 0 <= offset < len(self._shifts): current_pos = self._file.tell() new_pos = self._shifts[offset] if current_pos != new_pos: if current_pos == self._shifts[-1]: # reached the end of the file self._data = self.__reader() self.__file = iter(self._file.readline, '') self._file.seek(new_pos) else: raise IndexError('invalid offset') else: raise self._implement_error
shifts on a given number of record in the original file :param offset: number of record
entailment
def tell(self): """ :return: number of records processed from the original file """ if self._shifts: t = self._file.tell() return bisect_left(self._shifts, t) raise self._implement_error
:return: number of records processed from the original file
entailment
def write(self, data): """ write single molecule into file """ m = self._convert_structure(data) self._file.write(self._format_mol(*m)) self._file.write('M END\n') for k, v in data.meta.items(): self._file.write(f'> <{k}>\n{v}\n') self._file.write('$$$$\n')
write single molecule into file
entailment
def save_workflow_to_cache(self, serialized_wf_instance): """ If we aren't come to the end of the wf, saves the wf state and task_data to cache Task_data items that starts with underscore "_" are treated as local and does not passed to subsequent task steps. """ # self.current.task_data['flow'] = None task_data = self.current.task_data.copy() for k, v in list(task_data.items()): if k.startswith('_'): del task_data[k] if 'cmd' in task_data: del task_data['cmd'] self.wf_state.update({'step': serialized_wf_instance, 'data': task_data, 'name': self.current.workflow_name, 'wf_id': self.workflow_spec.wf_id }) if self.current.lane_id: self.current.pool[self.current.lane_id] = self.current.role.key self.wf_state['pool'] = self.current.pool self.current.log.debug("POOL Content before WF Save: %s" % self.current.pool) self.current.wf_cache.save(self.wf_state)
If we aren't come to the end of the wf, saves the wf state and task_data to cache Task_data items that starts with underscore "_" are treated as local and does not passed to subsequent task steps.
entailment
def get_pool_context(self): # TODO: Add in-process caching """ Builds context for the WF pool. Returns: Context dict. """ context = {self.current.lane_id: self.current.role, 'self': self.current.role} for lane_id, role_id in self.current.pool.items(): if role_id: context[lane_id] = lazy_object_proxy.Proxy( lambda: self.role_model(super_context).objects.get(role_id)) return context
Builds context for the WF pool. Returns: Context dict.
entailment
def load_workflow_from_cache(self): """ loads the serialized wf state and data from cache updates the self.current.task_data """ if not self.current.new_token: self.wf_state = self.current.wf_cache.get(self.wf_state) self.current.task_data = self.wf_state['data'] self.current.set_client_cmds() self.current.pool = self.wf_state['pool'] return self.wf_state['step']
loads the serialized wf state and data from cache updates the self.current.task_data
entailment
def serialize_workflow(self): """ Serializes the current WF. Returns: WF state data. """ self.workflow.refresh_waiting_tasks() return CompactWorkflowSerializer().serialize_workflow(self.workflow, include_spec=False)
Serializes the current WF. Returns: WF state data.
entailment
def load_or_create_workflow(self): """ Tries to load the previously serialized (and saved) workflow Creates a new one if it can't """ self.workflow_spec = self.get_worfklow_spec() return self._load_workflow() or self.create_workflow()
Tries to load the previously serialized (and saved) workflow Creates a new one if it can't
entailment
def find_workflow_path(self): """ Tries to find the path of the workflow diagram file in `WORKFLOW_PACKAGES_PATHS`. Returns: Path of the workflow spec file (BPMN diagram) """ for pth in settings.WORKFLOW_PACKAGES_PATHS: path = "%s/%s.bpmn" % (pth, self.current.workflow_name) if os.path.exists(path): return path err_msg = "BPMN file cannot found: %s" % self.current.workflow_name log.error(err_msg) raise RuntimeError(err_msg)
Tries to find the path of the workflow diagram file in `WORKFLOW_PACKAGES_PATHS`. Returns: Path of the workflow spec file (BPMN diagram)
entailment
def get_worfklow_spec(self): """ Generates and caches the workflow spec package from BPMN diagrams that read from disk Returns: SpiffWorkflow Spec object. """ # TODO: convert from in-process to redis based caching if self.current.workflow_name not in self.workflow_spec_cache: # path = self.find_workflow_path() # spec_package = InMemoryPackager.package_in_memory(self.current.workflow_name, path) # spec = BpmnSerializer().deserialize_workflow_spec(spec_package) try: self.current.wf_object = BPMNWorkflow.objects.get(name=self.current.workflow_name) except ObjectDoesNotExist: self.current.wf_object = BPMNWorkflow.objects.get(name='not_found') self.current.task_data['non-existent-wf'] = self.current.workflow_name self.current.workflow_name = 'not_found' xml_content = self.current.wf_object.xml.body spec = ZopsSerializer().deserialize_workflow_spec(xml_content, self.current.workflow_name) spec.wf_id = self.current.wf_object.key self.workflow_spec_cache[self.current.workflow_name] = spec return self.workflow_spec_cache[self.current.workflow_name]
Generates and caches the workflow spec package from BPMN diagrams that read from disk Returns: SpiffWorkflow Spec object.
entailment
def _save_or_delete_workflow(self): """ Calls the real save method if we pass the beggining of the wf """ if not self.current.task_type.startswith('Start'): if self.current.task_name.startswith('End') and not self.are_we_in_subprocess(): self.wf_state['finished'] = True self.wf_state['finish_date'] = datetime.now().strftime( settings.DATETIME_DEFAULT_FORMAT) if self.current.workflow_name not in settings.EPHEMERAL_WORKFLOWS and not \ self.wf_state['in_external']: wfi = WFCache(self.current).get_instance() TaskInvitation.objects.filter(instance=wfi, role=self.current.role, wf_name=wfi.wf.name).delete() self.current.log.info("Delete WFCache: %s %s" % (self.current.workflow_name, self.current.token)) self.save_workflow_to_cache(self.serialize_workflow())
Calls the real save method if we pass the beggining of the wf
entailment