text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _eratosthenes():
"""Yields the sequence of prime numbers via the Sieve of Eratosthenes.""" |
d = {} # map each composite integer to its first-found prime factor
for q in count(2): # q gets 2, 3, 4, 5, ... ad infinitum
p = d.pop(q, None)
if p is None:
# q not a key in D, so q is prime, therefore, yield it
yield q
# mark q squared as not-prime (with q as first-found prime factor)
d[q * q] = q
else:
# let x <- smallest (N*p)+q which wasn't yet known to be composite
# we just learned x is composite, with p first-found prime factor,
# since p is the first-found prime factor of q -- find and mark it
x = p + q
while x in d:
x += p
d[x] = p |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def atoms_order(self):
""" Morgan like algorithm for graph nodes ordering :return: dict of atom-weight pairs """ |
if not len(self): # for empty containers
return {}
elif len(self) == 1: # optimize single atom containers
return dict.fromkeys(self, 2)
params = {n: (int(node), tuple(sorted(int(edge) for edge in self._adj[n].values())))
for n, node in self.atoms()}
newlevels = {}
countprime = iter(primes)
weights = {x: newlevels.get(y) or newlevels.setdefault(y, next(countprime))
for x, y in sorted(params.items(), key=itemgetter(1))}
tries = len(self) * 4
numb = len(set(weights.values()))
stab = 0
while tries:
oldnumb = numb
neweights = {}
countprime = iter(primes)
# weights[n] ** 2 NEED for differentiation of molecules like A-B or any other complete graphs.
tmp = {n: reduce(mul, (weights[x] for x in m), weights[n] ** 2) for n, m in self._adj.items()}
weights = {x: (neweights.get(y) or neweights.setdefault(y, next(countprime)))
for x, y in sorted(tmp.items(), key=itemgetter(1))}
numb = len(set(weights.values()))
if numb == len(self): # each atom now unique
break
elif numb == oldnumb:
x = Counter(weights.values())
if x[min(x)] > 1:
if stab == 3:
break
elif stab >= 2:
break
stab += 1
elif stab:
stab = 0
tries -= 1
if not tries and numb < oldnumb:
warning('morgan. number of attempts exceeded. uniqueness has decreased. next attempt will be made')
tries = 1
else:
warning('morgan. number of attempts exceeded')
return weights |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init_manual(cls, pawn_value, knight_value, bishop_value, rook_value, queen_value, king_value):
""" Manual init method for external piece values :type: PAWN_VALUE: int :type: KNIGHT_VALUE: int :type: BISHOP_VALUE: int :type: ROOK_VALUE: int :type: QUEEN_VALUE: int """ |
piece_values = cls()
piece_values.PAWN_VALUE = pawn_value
piece_values.KNIGHT_VALUE = knight_value
piece_values.BISHOP_VALUE = bishop_value
piece_values.ROOK_VALUE = rook_value
piece_values.QUEEN_VALUE = queen_value
piece_values.KING_VALUE = king_value
return piece_values |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def val(self, piece, ref_color):
""" Finds value of ``Piece`` :type: piece: Piece :type: ref_color: Color :rtype: int """ |
if piece is None:
return 0
if ref_color == piece.color:
const = 1
else:
const = -1
if isinstance(piece, Pawn):
return self.PAWN_VALUE * const
elif isinstance(piece, Queen):
return self.QUEEN_VALUE * const
elif isinstance(piece, Bishop):
return self.BISHOP_VALUE * const
elif isinstance(piece, Rook):
return self.ROOK_VALUE * const
elif isinstance(piece, Knight):
return self.KNIGHT_VALUE * const
elif isinstance(piece, King):
return self.KING_VALUE * const
return 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_field_cache(self, cache_type='es'):
"""Return a list of fields' mappings""" |
if cache_type == 'kibana':
try:
search_results = urlopen(self.get_url).read().decode('utf-8')
except HTTPError: # as e:
# self.pr_err("get_field_cache(kibana), HTTPError: %s" % e)
return []
index_pattern = json.loads(search_results)
# Results look like: {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":6,"found":true,"_source":{"title":"aaa*","fields":"<what we want>"}} # noqa
fields_str = index_pattern['_source']['fields']
return json.loads(fields_str)
elif cache_type == 'es' or cache_type.startswith('elastic'):
search_results = urlopen(self.es_get_url).read().decode('utf-8')
es_mappings = json.loads(search_results)
# Results look like: {"<index_name>":{"mappings":{"<doc_type>":{"<field_name>":{"full_name":"<field_name>","mapping":{"<sub-field_name>":{"type":"date","index_name":"<sub-field_name>","boost":1.0,"index":"not_analyzed","store":false,"doc_values":false,"term_vector":"no","norms":{"enabled":false},"index_options":"docs","index_analyzer":"_date/16","search_analyzer":"_date/max","postings_format":"default","doc_values_format":"default","similarity":"default","fielddata":{},"ignore_malformed":false,"coerce":true,"precision_step":16,"format":"dateOptionalTime","null_value":null,"include_in_all":false,"numeric_resolution":"milliseconds","locale":""}}}, # noqa
# now convert the mappings into the .kibana format
field_cache = []
for (index_name, val) in iteritems(es_mappings):
if index_name != self.index: # only get non-'.kibana' indices
# self.pr_dbg("index: %s" % index_name)
m_dict = es_mappings[index_name]['mappings']
# self.pr_dbg('m_dict %s' % m_dict)
mappings = self.get_index_mappings(m_dict)
# self.pr_dbg('mappings %s' % mappings)
field_cache.extend(mappings)
field_cache = self.dedup_field_cache(field_cache)
return field_cache
self.pr_err("Unknown cache type: %s" % cache_type)
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def post_field_cache(self, field_cache):
"""Where field_cache is a list of fields' mappings""" |
index_pattern = self.field_cache_to_index_pattern(field_cache)
# self.pr_dbg("request/post: %s" % index_pattern)
resp = requests.post(self.post_url, data=index_pattern).text
# resp = {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":1,"created":true} # noqa
resp = json.loads(resp)
return 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def field_cache_to_index_pattern(self, field_cache):
"""Return a .kibana index-pattern doc_type""" |
mapping_dict = {}
mapping_dict['customFormats'] = "{}"
mapping_dict['title'] = self.index_pattern
# now post the data into .kibana
mapping_dict['fields'] = json.dumps(field_cache, separators=(',', ':'))
# in order to post, we need to create the post string
mapping_str = json.dumps(mapping_dict, separators=(',', ':'))
return mapping_str |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_mapping(self, m):
"""Assert minimum set of fields in cache, does not validate contents""" |
if 'name' not in m:
self.pr_dbg("Missing %s" % "name")
return False
# self.pr_dbg("Checking %s" % m['name'])
for x in ['analyzed', 'indexed', 'type', 'scripted', 'count']:
if x not in m or m[x] == "":
self.pr_dbg("Missing %s" % x)
self.pr_dbg("Full %s" % m)
return False
if 'doc_values' not in m or m['doc_values'] == "":
if not m['name'].startswith('_'):
self.pr_dbg("Missing %s" % "doc_values")
return False
m['doc_values'] = False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_index_mappings(self, index):
"""Converts all index's doc_types to .kibana""" |
fields_arr = []
for (key, val) in iteritems(index):
# self.pr_dbg("\tdoc_type: %s" % key)
doc_mapping = self.get_doc_type_mappings(index[key])
# self.pr_dbg("\tdoc_mapping: %s" % doc_mapping)
if doc_mapping is None:
return None
# keep adding to the fields array
fields_arr.extend(doc_mapping)
return fields_arr |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_doc_type_mappings(self, doc_type):
"""Converts all doc_types' fields to .kibana""" |
doc_fields_arr = []
found_score = False
for (key, val) in iteritems(doc_type):
# self.pr_dbg("\t\tfield: %s" % key)
# self.pr_dbg("\tval: %s" % val)
add_it = False
retdict = {}
# _ are system
if not key.startswith('_'):
if 'mapping' not in doc_type[key]:
self.pr_err("No mapping in doc_type[%s]" % key)
return None
if key in doc_type[key]['mapping']:
subkey_name = key
else:
subkey_name = re.sub('.*\.', '', key)
if subkey_name not in doc_type[key]['mapping']:
self.pr_err(
"Couldn't find subkey " +
"doc_type[%s]['mapping'][%s]" % (key, subkey_name))
return None
# self.pr_dbg("\t\tsubkey_name: %s" % subkey_name)
retdict = self.get_field_mappings(
doc_type[key]['mapping'][subkey_name])
add_it = True
# system mappings don't list a type,
# but kibana makes them all strings
if key in self.sys_mappings:
retdict['analyzed'] = False
retdict['indexed'] = False
if key == '_source':
retdict = self.get_field_mappings(
doc_type[key]['mapping'][key])
retdict['type'] = "_source"
elif key == '_score':
retdict['type'] = "number"
elif 'type' not in retdict:
retdict['type'] = "string"
add_it = True
if add_it:
retdict['name'] = key
retdict['count'] = 0 # always init to 0
retdict['scripted'] = False # I haven't observed a True yet
if not self.check_mapping(retdict):
self.pr_err("Error, invalid mapping")
return None
# the fields element is an escaped array of json
# make the array here, after all collected, then escape it
doc_fields_arr.append(retdict)
if not found_score:
doc_fields_arr.append(
{"name": "_score",
"type": "number",
"count": 0,
"scripted": False,
"indexed": False,
"analyzed": False,
"doc_values": False})
return doc_fields_arr |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_field_mappings(self, field):
"""Converts ES field mappings to .kibana field mappings""" |
retdict = {}
retdict['indexed'] = False
retdict['analyzed'] = False
for (key, val) in iteritems(field):
if key in self.mappings:
if (key == 'type' and
(val == "long" or
val == "integer" or
val == "double" or
val == "float")):
val = "number"
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
retdict[key] = val
if key == 'index' and val != "no":
retdict['indexed'] = True
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
if val == "analyzed":
retdict['analyzed'] = True
return retdict |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_kibana_cache_incomplete(self, es_cache, k_cache):
"""Test if k_cache is incomplete Assume k_cache is always correct, but could be missing new fields that es_cache has """ |
# convert list into dict, with each item's ['name'] as key
k_dict = {}
for field in k_cache:
# self.pr_dbg("field: %s" % field)
k_dict[field['name']] = field
for ign_f in self.mappings_ignore:
k_dict[field['name']][ign_f] = 0
es_dict = {}
for field in es_cache:
es_dict[field['name']] = field
for ign_f in self.mappings_ignore:
es_dict[field['name']][ign_f] = 0
es_set = set(es_dict.keys())
k_set = set(k_dict.keys())
# reasons why kibana cache could be incomplete:
# k_dict is missing keys that are within es_dict
# We don't care if k has keys that es doesn't
# es {1,2} k {1,2,3}; intersection {1,2}; len(es-{}) 0
# es {1,2} k {1,2}; intersection {1,2}; len(es-{}) 0
# es {1,2} k {}; intersection {}; len(es-{}) 2
# es {1,2} k {1}; intersection {1}; len(es-{}) 1
# es {2,3} k {1}; intersection {}; len(es-{}) 2
# es {2,3} k {1,2}; intersection {2}; len(es-{}) 1
return len(es_set - k_set.intersection(es_set)) > 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_to_compare_dict(self, list_form):
"""Convert list into a data structure we can query easier""" |
compare_dict = {}
for field in list_form:
if field['name'] in compare_dict:
self.pr_dbg("List has duplicate field %s:\n%s" %
(field['name'], compare_dict[field['name']]))
if compare_dict[field['name']] != field:
self.pr_dbg("And values are different:\n%s" % field)
return None
compare_dict[field['name']] = field
for ign_f in self.mappings_ignore:
compare_dict[field['name']][ign_f] = 0
return compare_dict |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compare_field_caches(self, replica, original):
"""Verify original is subset of replica""" |
if original is None:
original = []
if replica is None:
replica = []
self.pr_dbg("Comparing orig with %s fields to replica with %s fields" %
(len(original), len(replica)))
# convert list into dict, with each item's ['name'] as key
orig = self.list_to_compare_dict(original)
if orig is None:
self.pr_dbg("Original has duplicate fields")
return 1
repl = self.list_to_compare_dict(replica)
if repl is None:
self.pr_dbg("Replica has duplicate fields")
return 1
# search orig for each item in repl
# if any items in repl not within orig or vice versa, then complain
# make sure contents of each item match
orig_found = {}
for (key, field) in iteritems(repl):
field_name = field['name']
if field_name not in orig:
self.pr_dbg("Replica has field not found in orig %s: %s" %
(field_name, field))
return 1
orig_found[field_name] = True
if orig[field_name] != field:
self.pr_dbg("Field in replica doesn't match orig:")
self.pr_dbg("orig:%s\nrepl:%s" % (orig[field_name], field))
return 1
unfound = set(orig_found.keys()) - set(repl.keys())
if len(unfound) > 0:
self.pr_dbg("Orig contains fields that were not in replica")
self.pr_dbg('%s' % unfound)
return 1
# We don't care about case when replica has more fields than orig
# unfound = set(repl.keys()) - set(orig_found.keys())
# if len(unfound) > 0:
# self.pr_dbg("Replica contains fields that were not in orig")
# self.pr_dbg('%s' % unfound)
# return 1
self.pr_dbg("Original matches replica")
return 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def start_daemon_thread(target, args=()):
"""starts a deamon thread for a given target function and arguments.""" |
th = Thread(target=target, args=args)
th.daemon = True
th.start()
return th |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serialize_dict_keys(d, prefix=""):
"""returns all the keys in a dictionary. ['a', 'a.b', 'a.b.c', 'a.b.b'] """ |
keys = []
for k, v in d.iteritems():
fqk = '%s%s' % (prefix, k)
keys.append(fqk)
if isinstance(v, dict):
keys.extend(serialize_dict_keys(v, prefix="%s." % fqk))
return keys |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_user(self, user):
""" Writes user data to session. Args: user: User object """ |
self.session['user_id'] = user.key
self.session['user_data'] = user.clean_value()
role = self.get_role()
# TODO: this should be remembered from previous login
# self.session['role_data'] = default_role.clean_value()
self.session['role_id'] = role.key
self.current.role_id = role.key
self.current.user_id = user.key
# self.perm_cache = PermissionCache(role.key)
self.session['permissions'] = role.get_permissions() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def contains_opposite_color_piece(self, square, position):
""" Finds if square on the board is occupied by a ``Piece`` belonging to the opponent. :type: square: Location :type: position: Board :rtype: bool """ |
return not position.is_square_empty(square) and \
position.piece_at_square(square).color != self.color |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gettext(message, domain=DEFAULT_DOMAIN):
"""Mark a message as translateable, and translate it. All messages in the application that are translateable should be wrapped with this function. When importing this function, it should be renamed to '_'. For example: .. code-block:: python from zengine.lib.translation import gettext as _ print(_('Hello, world!')) 'Merhaba, dünya!' For the messages that will be formatted later on, instead of using the position-based formatting, key-based formatting should be used. This gives the translator an idea what the variables in the format are going to be, and makes it possible for the translator to reorder the variables. For example: .. code-block:: python name, number = 'Elizabeth', 'II' _('Queen %(name)s %(number)s') % {'name': name, 'number': number} 'Kraliçe II. Elizabeth' The message returned by this function depends on the language of the current user. If this function is called before a language is installed (which is normally done by ZEngine when the user connects), this function will simply return the message without modification. If there are messages containing unicode characters, in Python 2 these messages must be marked as unicode. Otherwise, python will not be able to correctly match these messages with translations. For example: .. code-block:: python print(_('Café')) 'Café' print(_(u'Café')) 'Kahve' Args: message (basestring, unicode):
The input message. domain (basestring):
The domain of the message. Defaults to 'messages', which is the domain where all application messages should be located. Returns: unicode: The translated message. """ |
if six.PY2:
return InstalledLocale._active_catalogs[domain].ugettext(message)
else:
return InstalledLocale._active_catalogs[domain].gettext(message) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gettext_lazy(message, domain=DEFAULT_DOMAIN):
"""Mark a message as translatable, but delay the translation until the message is used. Sometimes, there are some messages that need to be translated, but the translation can't be done at the point the message itself is written. For example, the names of the fields in a Model can't be translated at the point they are written, otherwise the translation would be done when the file is imported, long before a user even connects. To avoid this, `gettext_lazy` should be used. For example: .. code-block:: python from zengine.lib.translation import gettext_lazy, InstalledLocale from pyoko import model, fields class User(model.Model):
name = fields.String(gettext_lazy('User Name')) print(User.name.title) 'User Name' InstalledLocale.install_language('tr') print(User.name.title) 'Kullanıcı Adı' Args: message (basestring, unicode):
The input message. domain (basestring):
The domain of the message. Defaults to 'messages', which is the domain where all application messages should be located. Returns: unicode: The translated message, with the translation itself being delayed until the text is actually used. """ |
return LazyProxy(gettext, message, domain=domain, enable_cache=False) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ngettext(singular, plural, n, domain=DEFAULT_DOMAIN):
"""Mark a message as translateable, and translate it considering plural forms. Some messages may need to change based on a number. For example, consider a message like the following: .. code-block:: python def alert_msg(msg_count):
print( 'You have %d %s' % (msg_count, 'message' if msg_count == 1 else 'messages')) alert_msg(1) 'You have 1 message' alert_msg(5) 'You have 5 messages' To translate this message, you can use ngettext to consider the plural forms: .. code-block:: python from zengine.lib.translation import ngettext def alert_msg(msg_count):
print(ngettext('You have %(count)d message', 'You have %(count)d messages', msg_count) % {'count': msg_count}) alert_msg(1) '1 mesajınız var' alert_msg(5) '5 mesajlarınız var' When doing formatting, both singular and plural forms of the message should have the exactly same variables. Args: singular (unicode):
The singular form of the message. plural (unicode):
The plural form of the message. n (int):
The number that is used to decide which form should be used. domain (basestring):
The domain of the message. Defaults to 'messages', which is the domain where all application messages should be located. Returns: unicode: The correct pluralization, translated. """ |
if six.PY2:
return InstalledLocale._active_catalogs[domain].ungettext(singular, plural, n)
else:
return InstalledLocale._active_catalogs[domain].ngettext(singular, plural, n) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ngettext_lazy(singular, plural, n, domain=DEFAULT_DOMAIN):
"""Mark a message with plural forms translateable, and delay the translation until the message is used. Works the same was a `ngettext`, with a delaying functionality similiar to `gettext_lazy`. Args: singular (unicode):
The singular form of the message. plural (unicode):
The plural form of the message. n (int):
The number that is used to decide which form should be used. domain (basestring):
The domain of the message. Defaults to 'messages', which is the domain where all application messages should be located. Returns: unicode: The correct pluralization, with the translation being delayed until the message is used. """ |
return LazyProxy(ngettext, singular, plural, n, domain=domain, enable_cache=False) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def install_language(cls, language_code):
"""Install the translations for language specified by `language_code`. If we don't have translations for this language, then the default language will be used. If the language specified is already installed, then this is a no-op. """ |
# Skip if the language is already installed
if language_code == cls.language:
return
try:
cls._active_catalogs = cls._translation_catalogs[language_code]
cls.language = language_code
log.debug('Installed language %s', language_code)
except KeyError:
default = settings.DEFAULT_LANG
log.warning('Unknown language %s, falling back to %s', language_code, default)
cls._active_catalogs = cls._translation_catalogs[default]
cls.language = default |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def install_locale(cls, locale_code, locale_type):
"""Install the locale specified by `language_code`, for localizations of type `locale_type`. If we can't perform localized formatting for the specified locale, then the default localization format will be used. If the locale specified is already installed for the selected type, then this is a no-op. """ |
# Skip if the locale is already installed
if locale_code == getattr(cls, locale_type):
return
try:
# We create a Locale instance to see if the locale code is supported
locale = Locale(locale_code)
log.debug('Installed locale %s', locale_code)
except UnknownLocaleError:
default = settings.DEFAULT_LOCALIZATION_FORMAT
log.warning('Unknown locale %s, falling back to %s', locale_code, default)
locale = Locale(default)
setattr(cls, locale_type, locale.language) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _rotate_vector(x, y, x2, y2, x1, y1):
""" rotate x,y vector over x2-x1, y2-y1 angle """ |
angle = atan2(y2 - y1, x2 - x1)
cos_rad = cos(angle)
sin_rad = sin(angle)
return cos_rad * x + sin_rad * y, -sin_rad * x + cos_rad * y |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_by_index(self, index):
""" Returns the entry specified by index Note that the table is 1-based ie an index of 0 is invalid. This is due to the fact that a zero value index signals that a completely unindexed header follows. The entry will either be from the static table or the dynamic table depending on the value of index. """ |
index -= 1
if 0 <= index < len(CocaineHeaders.STATIC_TABLE):
return CocaineHeaders.STATIC_TABLE[index]
index -= len(CocaineHeaders.STATIC_TABLE)
if 0 <= index < len(self.dynamic_entries):
return self.dynamic_entries[index]
raise InvalidTableIndex("Invalid table index %d" % index) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add(self, name, value):
""" Adds a new entry to the table We reduce the table size if the entry will make the table size greater than maxsize. """ |
# We just clear the table if the entry is too big
size = table_entry_size(name, value)
if size > self._maxsize:
self.dynamic_entries.clear()
self._current_size = 0
# Add new entry if the table actually has a size
elif self._maxsize > 0:
self.dynamic_entries.appendleft((name, value))
self._current_size += size
self._shrink() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search(self, name, value):
""" Searches the table for the entry specified by name and value Returns one of the following: - ``None``, no match at all - ``(index, name, None)`` for partial matches on name only. - ``(index, name, value)`` for perfect matches. """ |
partial = None
header_name_search_result = CocaineHeaders.STATIC_TABLE_MAPPING.get(name)
if header_name_search_result:
index = header_name_search_result[1].get(value)
if index is not None:
return index, name, value
partial = (header_name_search_result[0], name, None)
offset = len(CocaineHeaders.STATIC_TABLE)
for (i, (n, v)) in enumerate(self.dynamic_entries):
if n == name:
if v == value:
return i + offset + 1, n, v
elif partial is None:
partial = (i + offset + 1, n, None)
return partial |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _shrink(self):
""" Shrinks the dynamic table to be at or below maxsize """ |
cursize = self._current_size
while cursize > self._maxsize:
name, value = self.dynamic_entries.pop()
cursize -= table_entry_size(name, value)
self._current_size = cursize |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def safe_print(ustring, errors='replace', **kwargs):
""" Safely print a unicode string """ |
encoding = sys.stdout.encoding or 'utf-8'
if sys.version_info[0] == 3:
print(ustring, **kwargs)
else:
bytestr = ustring.encode(encoding, errors=errors)
print(bytestr, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def edit_permissions(self):
"""Creates the view used to edit permissions. To create the view, data in the following format is passed to the UI in the objects field: .. code-block:: python { "type": "tree-toggle", "action": "set_permission", "tree": [ { "checked": true, "name": "Workflow 1 Name", "id": "workflow1", "children": [ { "checked": true, "name": "Task 1 Name", "id": "workflow1..task1", "children": [] }, { "checked": false, "id": "workflow1..task2", "name": "Task 2 Name", "children": [] } ] }, { "checked": true, "name": "Workflow 2 Name", "id": "workflow2", "children": [ { "checked": true, "name": "Workflow 2 Lane 1 Name", "id": "workflow2.lane1", "children": [ { "checked": true, "name": "Workflow 2 Task 1 Name", "id": "workflow2.lane1.task1", "children": [] }, { "checked": false, "name": "Workflow 2 Task 2 Name", "id": "workflow2.lane1.task2", "children": [] } ] } ] } ] } "type" field denotes that the object is a tree view which has elements that can be toggled. "action" field is the "name" field is the human readable name. "id" field is used to make requests to the backend. "checked" field shows whether the role has the permission or not. "children" field is the sub-permissions of the permission. """ |
# Get the role that was selected in the CRUD view
key = self.current.input['object_id']
self.current.task_data['role_id'] = key
role = RoleModel.objects.get(key=key)
# Get the cached permission tree, or build a new one if there is none cached
# TODO: Add an extra view in case there was no cache, as in 'please wait calculating permissions'
permission_tree = self._permission_trees(PermissionModel.objects)
# Apply the selected role to the permission tree, setting the 'checked' field
# of the permission the role has
role_tree = self._apply_role_tree(permission_tree, role)
# Apply final formatting, and output the tree to the UI
self.output['objects'] = [
{
'type': 'tree-toggle',
'action': 'apply_change',
'trees': self._format_tree_output(role_tree),
},
]
self.form_out(PermissionForm()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _permission_trees(permissions):
"""Get the cached permission tree, or build a new one if necessary.""" |
treecache = PermissionTreeCache()
cached = treecache.get()
if not cached:
tree = PermissionTreeBuilder()
for permission in permissions:
tree.insert(permission)
result = tree.serialize()
treecache.set(result)
return result
return cached |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _traverse_tree(tree, path):
"""Traverses the permission tree, returning the permission at given permission path.""" |
path_steps = (step for step in path.split('.') if step != '')
# Special handling for first step, because the first step isn't under 'objects'
first_step = path_steps.next()
subtree = tree[first_step]
for step in path_steps:
subtree = subtree['children'][step]
return subtree |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _format_subtree(self, subtree):
"""Recursively format all subtrees.""" |
subtree['children'] = list(subtree['children'].values())
for child in subtree['children']:
self._format_subtree(child)
return subtree |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def apply_change(self):
"""Applies changes to the permissions of the role. To make a change to the permission of the role, a request in the following format should be sent: .. code-block:: python { 'change': { 'id': 'workflow2.lane1.task1', 'checked': false }, } The 'id' field of the change is the id of the tree element that was sent to the UI (see `Permissions.edit_permissions`). 'checked' field is the new state of the element. """ |
changes = self.input['change']
key = self.current.task_data['role_id']
role = RoleModel.objects.get(key=key)
for change in changes:
permission = PermissionModel.objects.get(code=change['id'])
if change['checked'] is True:
role.add_permission(permission)
else:
role.remove_permission(permission)
role.save() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write(self, data):
""" write single molecule into file """ |
m = self._convert_structure(data)
self._file.write(self._format_mol(*m))
self._file.write('M END\n')
for k, v in data.meta.items():
self._file.write(f'> <{k}>\n{v}\n')
self._file.write('$$$$\n') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_workflow_to_cache(self, serialized_wf_instance):
""" If we aren't come to the end of the wf, saves the wf state and task_data to cache Task_data items that starts with underscore "_" are treated as local and does not passed to subsequent task steps. """ |
# self.current.task_data['flow'] = None
task_data = self.current.task_data.copy()
for k, v in list(task_data.items()):
if k.startswith('_'):
del task_data[k]
if 'cmd' in task_data:
del task_data['cmd']
self.wf_state.update({'step': serialized_wf_instance,
'data': task_data,
'name': self.current.workflow_name,
'wf_id': self.workflow_spec.wf_id
})
if self.current.lane_id:
self.current.pool[self.current.lane_id] = self.current.role.key
self.wf_state['pool'] = self.current.pool
self.current.log.debug("POOL Content before WF Save: %s" % self.current.pool)
self.current.wf_cache.save(self.wf_state) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_pool_context(self):
# TODO: Add in-process caching """ Builds context for the WF pool. Returns: Context dict. """ |
context = {self.current.lane_id: self.current.role, 'self': self.current.role}
for lane_id, role_id in self.current.pool.items():
if role_id:
context[lane_id] = lazy_object_proxy.Proxy(
lambda: self.role_model(super_context).objects.get(role_id))
return context |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_workflow_from_cache(self):
""" loads the serialized wf state and data from cache updates the self.current.task_data """ |
if not self.current.new_token:
self.wf_state = self.current.wf_cache.get(self.wf_state)
self.current.task_data = self.wf_state['data']
self.current.set_client_cmds()
self.current.pool = self.wf_state['pool']
return self.wf_state['step'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serialize_workflow(self):
""" Serializes the current WF. Returns: WF state data. """ |
self.workflow.refresh_waiting_tasks()
return CompactWorkflowSerializer().serialize_workflow(self.workflow,
include_spec=False) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_workflow_path(self):
""" Tries to find the path of the workflow diagram file in `WORKFLOW_PACKAGES_PATHS`. Returns: Path of the workflow spec file (BPMN diagram) """ |
for pth in settings.WORKFLOW_PACKAGES_PATHS:
path = "%s/%s.bpmn" % (pth, self.current.workflow_name)
if os.path.exists(path):
return path
err_msg = "BPMN file cannot found: %s" % self.current.workflow_name
log.error(err_msg)
raise RuntimeError(err_msg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_worfklow_spec(self):
""" Generates and caches the workflow spec package from BPMN diagrams that read from disk Returns: SpiffWorkflow Spec object. """ |
# TODO: convert from in-process to redis based caching
if self.current.workflow_name not in self.workflow_spec_cache:
# path = self.find_workflow_path()
# spec_package = InMemoryPackager.package_in_memory(self.current.workflow_name, path)
# spec = BpmnSerializer().deserialize_workflow_spec(spec_package)
try:
self.current.wf_object = BPMNWorkflow.objects.get(name=self.current.workflow_name)
except ObjectDoesNotExist:
self.current.wf_object = BPMNWorkflow.objects.get(name='not_found')
self.current.task_data['non-existent-wf'] = self.current.workflow_name
self.current.workflow_name = 'not_found'
xml_content = self.current.wf_object.xml.body
spec = ZopsSerializer().deserialize_workflow_spec(xml_content, self.current.workflow_name)
spec.wf_id = self.current.wf_object.key
self.workflow_spec_cache[self.current.workflow_name] = spec
return self.workflow_spec_cache[self.current.workflow_name] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _save_or_delete_workflow(self):
""" Calls the real save method if we pass the beggining of the wf """ |
if not self.current.task_type.startswith('Start'):
if self.current.task_name.startswith('End') and not self.are_we_in_subprocess():
self.wf_state['finished'] = True
self.wf_state['finish_date'] = datetime.now().strftime(
settings.DATETIME_DEFAULT_FORMAT)
if self.current.workflow_name not in settings.EPHEMERAL_WORKFLOWS and not \
self.wf_state['in_external']:
wfi = WFCache(self.current).get_instance()
TaskInvitation.objects.filter(instance=wfi, role=self.current.role,
wf_name=wfi.wf.name).delete()
self.current.log.info("Delete WFCache: %s %s" % (self.current.workflow_name,
self.current.token))
self.save_workflow_to_cache(self.serialize_workflow()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def start_engine(self, **kwargs):
""" Initializes the workflow with given request, response objects and diagram name. Args: session: input: workflow_name (str):
Name of workflow diagram without ".bpmn" suffix. File must be placed under one of configured :py:attr:`~zengine.settings.WORKFLOW_PACKAGES_PATHS` """ |
self.current = WFCurrent(**kwargs)
self.wf_state = {'in_external': False, 'finished': False}
if not self.current.new_token:
self.wf_state = self.current.wf_cache.get(self.wf_state)
self.current.workflow_name = self.wf_state['name']
# if we have a pre-selected object to work with,
# inserting it as current.input['id'] and task_data['object_id']
if 'subject' in self.wf_state:
self.current.input['id'] = self.wf_state['subject']
self.current.task_data['object_id'] = self.wf_state['subject']
self.check_for_authentication()
self.check_for_permission()
self.workflow = self.load_or_create_workflow()
# if form data exists in input (user submitted)
# put form data in wf task_data
if 'form' in self.current.input:
form = self.current.input['form']
if 'form_name' in form:
self.current.task_data[form['form_name']] = form
# in wf diagram, if property is stated as init = True
# demanded initial values are assigned and put to cache
start_init_values = self.workflow_spec.wf_properties.get('init', 'False') == 'True'
if start_init_values:
WFInit = get_object_from_path(settings.WF_INITIAL_VALUES)()
WFInit.assign_wf_initial_values(self.current)
log_msg = ("\n\n::::::::::: ENGINE STARTED :::::::::::\n"
"\tWF: %s (Possible) TASK:%s\n"
"\tCMD:%s\n"
"\tSUBCMD:%s" % (
self.workflow.name,
self.workflow.get_tasks(Task.READY),
self.current.input.get('cmd'), self.current.input.get('subcmd')))
log.debug(log_msg)
sys._zops_wf_state_log = log_msg
self.current.workflow = self.workflow |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_wf_state_log(self):
""" Logs the state of workflow and content of task_data. """ |
output = '\n- - - - - -\n'
output += "WORKFLOW: %s ( %s )" % (self.current.workflow_name.upper(),
self.current.workflow.name)
output += "\nTASK: %s ( %s )\n" % (self.current.task_name, self.current.task_type)
output += "DATA:"
for k, v in self.current.task_data.items():
if v:
output += "\n\t%s: %s" % (k, v)
output += "\nCURRENT:"
output += "\n\tACTIVITY: %s" % self.current.activity
output += "\n\tPOOL: %s" % self.current.pool
output += "\n\tIN EXTERNAL: %s" % self.wf_state['in_external']
output += "\n\tLANE: %s" % self.current.lane_name
output += "\n\tTOKEN: %s" % self.current.token
sys._zops_wf_state_log = output
return output |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def switch_from_external_to_main_wf(self):
""" Main workflow switcher. This method recreates main workflow from `main wf` dict which was set by external workflow swicther previously. """ |
# in external assigned as True in switch_to_external_wf.
# external_wf should finish EndEvent and it's name should be
# also EndEvent for switching again to main wf.
if self.wf_state['in_external'] and self.current.task_type == 'EndEvent' and \
self.current.task_name == 'EndEvent':
# main_wf information was copied in switch_to_external_wf and it takes this information.
main_wf = self.wf_state['main_wf']
# main_wf_name is assigned to current workflow name again.
self.current.workflow_name = main_wf['name']
# For external WF, check permission and authentication. But after cleaning current task.
self._clear_current_task()
# check for auth and perm. current task cleared, do against new workflow_name
self.check_for_authentication()
self.check_for_permission()
# WF knowledge is taken for main wf.
self.workflow_spec = self.get_worfklow_spec()
# WF instance is started again where leave off.
self.workflow = self.deserialize_workflow(main_wf['step'])
# Current WF is this WF instance.
self.current.workflow = self.workflow
# in_external is assigned as False
self.wf_state['in_external'] = False
# finished is assigned as False, because still in progress.
self.wf_state['finished'] = False
# pool info of main_wf is assigned.
self.wf_state['pool'] = main_wf['pool']
self.current.pool = self.wf_state['pool']
# With main_wf is executed.
self.run() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def switch_to_external_wf(self):
""" External workflow switcher. This method copies main workflow information into a temporary dict `main_wf` and makes external workflow acting as main workflow. """ |
# External WF name should be stated at main wf diagram and type should be service task.
if (self.current.task_type == 'ServiceTask' and
self.current.task.task_spec.type == 'external'):
log.debug("Entering to EXTERNAL WF")
# Main wf information is copied to main_wf.
main_wf = self.wf_state.copy()
# workflow name from main wf diagram is assigned to current workflow name.
# workflow name must be either in task_data with key 'external_wf' or in main diagram's
# topic.
self.current.workflow_name = self.current.task_data.pop('external_wf', False) or self.\
current.task.task_spec.topic
# For external WF, check permission and authentication. But after cleaning current task.
self._clear_current_task()
# check for auth and perm. current task cleared, do against new workflow_name
self.check_for_authentication()
self.check_for_permission()
# wf knowledge is taken for external wf.
self.workflow_spec = self.get_worfklow_spec()
# New WF instance is created for external wf.
self.workflow = self.create_workflow()
# Current WF is this WF instance.
self.current.workflow = self.workflow
# main_wf: main wf information.
# in_external: it states external wf in progress.
# finished: it shows that main wf didn't finish still progress in external wf.
self.wf_state = {'main_wf': main_wf, 'in_external': True, 'finished': False} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _clear_current_task(self):
""" Clear tasks related attributes, checks permissions While switching WF to WF, authentication and permissions are checked for new WF. """ |
self.current.task_name = None
self.current.task_type = None
self.current.task = None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self):
""" Main loop of the workflow engine - Updates ::class:`~WFCurrent` object. - Checks for Permissions. - Activates all READY tasks. - Runs referenced activities (method calls). - Saves WF states. - Stops if current task is a UserTask or EndTask. - Deletes state object if we finish the WF. """ |
# FIXME: raise if first task after line change isn't a UserTask
# FIXME: raise if last task of a workflow is a UserTask
# actually this check should be done at parser
is_lane_changed = False
while self._should_we_run():
self.check_for_rerun_user_task()
task = None
for task in self.workflow.get_tasks(state=Task.READY):
self.current.old_lane = self.current.lane_name
self.current._update_task(task)
if self.catch_lane_change():
return
self.check_for_permission()
self.check_for_lane_permission()
self.log_wf_state()
self.switch_lang()
self.run_activity()
self.parse_workflow_messages()
self.workflow.complete_task_from_id(self.current.task.id)
self._save_or_delete_workflow()
self.switch_to_external_wf()
if task is None:
break
self.switch_from_external_to_main_wf()
self.current.output['token'] = self.current.token
# look for incoming ready task(s)
for task in self.workflow.get_tasks(state=Task.READY):
self.current._update_task(task)
self.catch_lane_change()
self.handle_wf_finalization() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def switch_lang(self):
"""Switch to the language of the current user. If the current language is already the specified one, nothing will be done. """ |
locale = self.current.locale
translation.InstalledLocale.install_language(locale['locale_language'])
translation.InstalledLocale.install_locale(locale['locale_datetime'], 'datetime')
translation.InstalledLocale.install_locale(locale['locale_number'], 'number') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def catch_lane_change(self):
""" trigger a lane_user_change signal if we switched to a new lane and new lane's user is different from current one """ |
if self.current.lane_name:
if self.current.old_lane and self.current.lane_name != self.current.old_lane:
# if lane_name not found in pool or it's user different from the current(old) user
if (self.current.lane_id not in self.current.pool or
self.current.pool[self.current.lane_id] != self.current.user_id):
self.current.log.info("LANE CHANGE : %s >> %s" % (self.current.old_lane,
self.current.lane_name))
if self.current.lane_auto_sendoff:
self.current.sendoff_current_user()
self.current.flow_enabled = False
if self.current.lane_auto_invite:
self.current.invite_other_parties(self._get_possible_lane_owners())
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_workflow_messages(self):
""" Transmits client message that defined in a workflow task's inputOutput extension .. code-block:: xml <bpmn2:extensionElements> <camunda:inputOutput> <camunda:inputParameter name="client_message"> <camunda:map> <camunda:entry key="title">Teşekkürler</camunda:entry> <camunda:entry key="body">İşlem Başarılı</camunda:entry> <camunda:entry key="type">info</camunda:entry> </camunda:map> </camunda:inputParameter> </camunda:inputOutput> </bpmn2:extensionElements> """ |
if 'client_message' in self.current.spec.data:
m = self.current.spec.data['client_message']
self.current.msg_box(title=m.get('title'),
msg=m.get('body'),
typ=m.get('type', 'info')) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_activity(self):
""" runs the method that referenced from current task """ |
activity = self.current.activity
if activity:
if activity not in self.wf_activities:
self._load_activity(activity)
self.current.log.debug(
"Calling Activity %s from %s" % (activity, self.wf_activities[activity]))
self.wf_activities[self.current.activity](self.current) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _import_object(self, path, look_for_cls_method):
""" Imports the module that contains the referenced method. Args: path: python path of class/function look_for_cls_method (bool):
If True, treat the last part of path as class method. Returns: Tuple. (class object, class name, method to be called) """ |
last_nth = 2 if look_for_cls_method else 1
path = path.split('.')
module_path = '.'.join(path[:-last_nth])
class_name = path[-last_nth]
module = importlib.import_module(module_path)
if look_for_cls_method and path[-last_nth:][0] == path[-last_nth]:
class_method = path[-last_nth:][1]
else:
class_method = None
return getattr(module, class_name), class_name, class_method |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _load_activity(self, activity):
""" Iterates trough the all enabled `~zengine.settings.ACTIVITY_MODULES_IMPORT_PATHS` to find the given path. """ |
fpths = []
full_path = ''
errors = []
paths = settings.ACTIVITY_MODULES_IMPORT_PATHS
number_of_paths = len(paths)
for index_no in range(number_of_paths):
full_path = "%s.%s" % (paths[index_no], activity)
for look4kls in (0, 1):
try:
self.current.log.info("try to load from %s[%s]" % (full_path, look4kls))
kls, cls_name, cls_method = self._import_object(full_path, look4kls)
if cls_method:
self.current.log.info("WILLCall %s(current).%s()" % (kls, cls_method))
self.wf_activities[activity] = lambda crnt: getattr(kls(crnt), cls_method)()
else:
self.wf_activities[activity] = kls
return
except (ImportError, AttributeError):
fpths.append(full_path)
errmsg = "{activity} not found under these paths:\n\n >>> {paths} \n\n" \
"Error Messages:\n {errors}"
errors.append("\n========================================================>\n"
"| PATH | %s"
"\n========================================================>\n\n"
"%s" % (full_path, traceback.format_exc()))
assert index_no != number_of_paths - 1, errmsg.format(activity=activity,
paths='\n >>> '.join(
set(fpths)),
errors='\n\n'.join(errors)
)
except:
self.current.log.exception("Cannot found the %s" % activity) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_for_lane_permission(self):
""" One or more permissions can be associated with a lane of a workflow. In a similar way, a lane can be restricted with relation to other lanes of the workflow. This method called on lane changes and checks user has required permissions and relations. Raises: HTTPForbidden: if the current user hasn't got the required permissions and proper relations """ |
# TODO: Cache lane_data in app memory
if self.current.lane_permission:
log.debug("HAS LANE PERM: %s" % self.current.lane_permission)
perm = self.current.lane_permission
if not self.current.has_permission(perm):
raise HTTPError(403, "You don't have required lane permission: %s" % perm)
if self.current.lane_relations:
context = self.get_pool_context()
log.debug("HAS LANE RELS: %s" % self.current.lane_relations)
try:
cond_result = eval(self.current.lane_relations, context)
except:
log.exception("CONDITION EVAL ERROR : %s || %s" % (
self.current.lane_relations, context))
raise
if not cond_result:
log.debug("LANE RELATION ERR: %s %s" % (self.current.lane_relations, context))
raise HTTPError(403, "You aren't qualified for this lane: %s" %
self.current.lane_relations) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle_wf_finalization(self):
""" Removes the ``token`` key from ``current.output`` if WF is over. """ |
if ((not self.current.flow_enabled or (
self.current.task_type.startswith('End') and not self.are_we_in_subprocess())) and
'token' in self.current.output):
del self.current.output['token'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_rdkit_molecule(data):
""" RDKit molecule object to MoleculeContainer converter """ |
m = MoleculeContainer()
atoms, mapping = [], []
for a in data.GetAtoms():
atom = {'element': a.GetSymbol(), 'charge': a.GetFormalCharge()}
atoms.append(atom)
mapping.append(a.GetAtomMapNum())
isotope = a.GetIsotope()
if isotope:
atom['isotope'] = isotope
radical = a.GetNumRadicalElectrons()
if radical:
atom['multiplicity'] = radical + 1
conformers = data.GetConformers()
if conformers:
for atom, (x, y, z) in zip(atoms, conformers[0].GetPositions()):
atom['x'] = x
atom['y'] = y
atom['z'] = z
for atom, mapping in zip(atoms, mapping):
a = m.add_atom(atom)
if mapping:
m.atom(a)._parsed_mapping = mapping
for bond in data.GetBonds():
m.add_bond(bond.GetBeginAtomIdx() + 1, bond.GetEndAtomIdx() + 1, _rdkit_bond_map[bond.GetBondType()])
return m |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_rdkit_molecule(data):
""" MoleculeContainer to RDKit molecule object converter """ |
mol = RWMol()
conf = Conformer()
mapping = {}
is_3d = False
for n, a in data.atoms():
ra = Atom(a.number)
ra.SetAtomMapNum(n)
if a.charge:
ra.SetFormalCharge(a.charge)
if a.isotope != a.common_isotope:
ra.SetIsotope(a.isotope)
if a.radical:
ra.SetNumRadicalElectrons(a.radical)
mapping[n] = m = mol.AddAtom(ra)
conf.SetAtomPosition(m, (a.x, a.y, a.z))
if a.z:
is_3d = True
if not is_3d:
conf.Set3D(False)
for n, m, b in data.bonds():
mol.AddBond(mapping[n], mapping[m], _bond_map[b.order])
mol.AddConformer(conf)
SanitizeMol(mol)
return mol |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __dfs(self, start, weights, depth_limit):
""" modified NX dfs """ |
adj = self._adj
stack = [(start, depth_limit, iter(sorted(adj[start], key=weights)))]
visited = {start}
disconnected = defaultdict(list)
edges = defaultdict(list)
while stack:
parent, depth_now, children = stack[-1]
try:
child = next(children)
except StopIteration:
stack.pop()
else:
if child not in visited:
edges[parent].append(child)
visited.add(child)
if depth_now > 1:
front = adj[child].keys() - {parent}
if front:
stack.append((child, depth_now - 1, iter(sorted(front, key=weights))))
elif child not in disconnected:
disconnected[parent].append(child)
return visited, edges, disconnected |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_args_parser():
"""Return a parser for command line options.""" |
parser = argparse.ArgumentParser(
description='Marabunta: Migrating ants for Odoo')
parser.add_argument('--migration-file', '-f',
action=EnvDefault,
envvar='MARABUNTA_MIGRATION_FILE',
required=True,
help='The yaml file containing the migration steps')
parser.add_argument('--database', '-d',
action=EnvDefault,
envvar='MARABUNTA_DATABASE',
required=True,
help="Odoo's database")
parser.add_argument('--db-user', '-u',
action=EnvDefault,
envvar='MARABUNTA_DB_USER',
required=True,
help="Odoo's database user")
parser.add_argument('--db-password', '-w',
action=EnvDefault,
envvar='MARABUNTA_DB_PASSWORD',
required=True,
help="Odoo's database password")
parser.add_argument('--db-port', '-p',
default=os.environ.get('MARABUNTA_DB_PORT', 5432),
help="Odoo's database port")
parser.add_argument('--db-host', '-H',
default=os.environ.get('MARABUNTA_DB_HOST',
'localhost'),
help="Odoo's database host")
parser.add_argument('--mode',
action=EnvDefault,
envvar='MARABUNTA_MODE',
required=False,
help="Specify the mode in which we run the migration,"
"such as 'demo' or 'prod'. Additional operations "
"of this mode will be executed after the main "
"operations and the addons list of this mode "
"will be merged with the main addons list.")
parser.add_argument('--allow-serie',
action=BoolEnvDefault,
required=False,
envvar='MARABUNTA_ALLOW_SERIE',
help='Allow to run more than 1 version upgrade at a '
'time.')
parser.add_argument('--force-version',
required=False,
default=os.environ.get('MARABUNTA_FORCE_VERSION'),
help='Force upgrade of a version, even if it has '
'already been applied.')
group = parser.add_argument_group(
title='Web',
description='Configuration related to the internal web server, '
'used to publish a maintenance page during the migration.',
)
group.add_argument('--web-host',
required=False,
default=os.environ.get('MARABUNTA_WEB_HOST', '0.0.0.0'),
help='Host for the web server')
group.add_argument('--web-port',
required=False,
default=os.environ.get('MARABUNTA_WEB_PORT', 8069),
help='Port for the web server')
group.add_argument('--web-custom-html',
required=False,
default=os.environ.get(
'MARABUNTA_WEB_CUSTOM_HTML'
),
help='Path to a custom html file to publish')
return parser |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_parse_args(cls, args):
"""Constructor from command line args. :param args: parse command line arguments :type args: argparse.ArgumentParser """ |
return cls(args.migration_file,
args.database,
db_user=args.db_user,
db_password=args.db_password,
db_port=args.db_port,
db_host=args.db_host,
mode=args.mode,
allow_serie=args.allow_serie,
force_version=args.force_version,
web_host=args.web_host,
web_port=args.web_port,
web_custom_html=args.web_custom_html,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_current(self, current):
""" Creates some aliases for attributes of ``current``. Args: current: :attr:`~zengine.engine.WFCurrent` object. """ |
self.current = current
self.input = current.input
# self.req = current.request
# self.resp = current.response
self.output = current.output
self.cmd = current.task_data['cmd']
if self.cmd and NEXT_CMD_SPLITTER in self.cmd:
self.cmd, self.next_cmd = self.cmd.split(NEXT_CMD_SPLITTER)
else:
self.next_cmd = None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def form_out(self, _form=None):
""" Renders form. Applies form modifiers, then writes result to response payload. If supplied, given form object instance will be used instead of view's default ObjectForm. Args: _form (:py:attr:`~zengine.forms.json_form.JsonForm`):
Form object to override `self.object_form` """ |
_form = _form or self.object_form
self.output['forms'] = _form.serialize()
self._add_meta_props(_form)
self.output['forms']['grouping'] = _form.Meta.grouping
self.output['forms']['constraints'] = _form.Meta.constraints
self._patch_form(self.output['forms'])
self.set_client_cmd('form') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self):
""" Creates new permissions. """ |
from pyoko.lib.utils import get_object_from_path
from zengine.config import settings
model = get_object_from_path(settings.PERMISSION_MODEL)
perm_provider = get_object_from_path(settings.PERMISSION_PROVIDER)
existing_perms = []
new_perms = []
for code, name, desc in perm_provider():
code = six.text_type(code)
if self.manager.args.dry:
exists = model.objects.filter(code=code, name=name)
if exists:
perm = exists[0]
new = False
else:
new = True
perm = model(code=code, name=name)
else:
try:
perm = model.objects.get(code)
existing_perms.append(perm)
except ObjectDoesNotExist:
perm = model(description=desc, code=code, name=name)
perm.key = code
perm.save()
new_perms.append(perm)
# perm, new = model.objects.get_or_create({'description': desc}, code=code, name=name)
# if new:
# new_perms.append(perm)
# else:
# existing_perms.append(perm)
report = "\n\n%s permission(s) were found in DB. " % len(existing_perms)
if new_perms:
report += "\n%s new permission record added. " % len(new_perms)
else:
report += 'No new perms added. '
if new_perms:
if not self.manager.args.dry:
SelectBoxCache.flush(model.__name__)
report += 'Total %s perms exists.' % (len(existing_perms) + len(new_perms))
report = "\n + " + "\n + ".join([p.name or p.code for p in new_perms]) + report
if self.manager.args.dry:
print("\n~~~~~~~~~~~~~~ DRY RUN ~~~~~~~~~~~~~~\n")
print(report + "\n") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self):
""" Creates user, encrypts password. """ |
from zengine.models import User
user = User(username=self.manager.args.username, superuser=self.manager.args.super)
user.set_password(self.manager.args.password)
user.save()
print("New user created with ID: %s" % user.key) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _prepare_domain(mapping):
"""Prepare a helper dictionary for the domain to temporarily hold some information.""" |
# Parse the domain-directory mapping
try:
domain, dir = mapping.split(':')
except ValueError:
print("Please provide the sources in the form of '<domain>:<directory>'")
sys.exit(1)
try:
default_language = settings.TRANSLATION_DOMAINS[domain]
except KeyError:
print("Unknown domain {domain}, check the settings file to make sure"
" this domain is set in TRANSLATION_DOMAINS".format(domain=domain))
sys.exit(1)
# Create a temporary file to hold the `.pot` file for this domain
handle, path = tempfile.mkstemp(prefix='zengine_i18n_', suffix='.pot')
return (domain, {
'default': default_language,
'pot': path,
'source': dir,
}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _validate_domains(domains):
"""Check that all domains specified in the settings was provided in the options.""" |
missing = set(settings.TRANSLATION_DOMAINS.keys()) - set(domains.keys())
if missing:
print('The following domains have been set in the configuration, '
'but their sources were not provided, use the `--source` '
'option to specify their sources: {domains}'.format(domains=', '.join(missing)))
sys.exit(1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _extract_translations(self, domains):
"""Extract the translations into `.pot` files""" |
for domain, options in domains.items():
# Create the extractor
extractor = babel_frontend.extract_messages()
extractor.initialize_options()
# The temporary location to write the `.pot` file
extractor.output_file = options['pot']
# Add the comments marked with 'tn:' to the translation file for translators to read. Strip the marker.
extractor.add_comments = ['tn:']
extractor.strip_comments = True
# The directory where the sources for this domain are located
extractor.input_paths = [options['source']]
# Pass the metadata to the translator
extractor.msgid_bugs_address = self.manager.args.contact
extractor.copyright_holder = self.manager.args.copyright
extractor.version = self.manager.args.version
extractor.project = self.manager.args.project
extractor.finalize_options()
# Add keywords for lazy translation functions, based on their non-lazy variants
extractor.keywords.update({
'gettext_lazy': extractor.keywords['gettext'],
'ngettext_lazy': extractor.keywords['ngettext'],
'__': extractor.keywords['gettext'], # double underscore for lazy
})
# Do the extraction
_run_babel_command(extractor) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _init_update_po_files(self, domains):
"""Update or initialize the `.po` translation files""" |
for language in settings.TRANSLATIONS:
for domain, options in domains.items():
if language == options['default']: continue # Default language of the domain doesn't need translations
if os.path.isfile(_po_path(language, domain)):
# If the translation already exists, update it, keeping the parts already translated
self._update_po_file(language, domain, options['pot'])
else:
# The translation doesn't exist, create a new translation file
self._init_po_file(language, domain, options['pot']) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _cleanup(self, domains):
"""Remove the temporary '.pot' files that were created for the domains.""" |
for option in domains.values():
try:
os.remove(option['pot'])
except (IOError, OSError):
# It is not a problem if we can't actually remove the temporary file
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self):
""" read workflows, checks if it's updated, tries to update if there aren't any running instances of that wf """ |
from zengine.lib.cache import WFSpecNames
if self.manager.args.clear:
self._clear_models()
return
if self.manager.args.wf_path:
paths = self.get_wf_from_path(self.manager.args.wf_path)
else:
paths = self.get_workflows()
self.count = 0
self.do_with_submit(self.load_diagram, paths, threads=self.manager.args.threads)
WFSpecNames().refresh()
print("%s BPMN file loaded" % self.count) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_workflows(self):
""" Scans and loads all wf found under WORKFLOW_PACKAGES_PATHS Yields: XML content of diagram file """ |
for pth in settings.WORKFLOW_PACKAGES_PATHS:
for f in glob.glob("%s/*.bpmn" % pth):
with open(f) as fp:
yield os.path.basename(os.path.splitext(f)[0]), fp.read() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_migration_and_solr(self):
""" The model or models are checked for migrations that need to be done. Solr is also checked. """ |
from pyoko.db.schema_update import SchemaUpdater
from socket import error as socket_error
from pyoko.conf import settings
from importlib import import_module
import_module(settings.MODELS_MODULE)
registry = import_module('pyoko.model').model_registry
models = [model for model in registry.get_base_models()]
try:
print(__(u"Checking migration and solr ..."))
updater = SchemaUpdater(models, 1, False)
updater.run(check_only=True)
except socket_error as e:
print(__(u"{0}Error not connected, open redis and rabbitmq{1}").format(CheckList.FAIL,
CheckList.ENDC)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_redis():
""" Redis checks the connection It displays on the screen whether or not you have a connection. """ |
from pyoko.db.connection import cache
from redis.exceptions import ConnectionError
try:
cache.ping()
print(CheckList.OKGREEN + "{0}Redis is working{1}" + CheckList.ENDC)
except ConnectionError as e:
print(__(u"{0}Redis is not working{1} ").format(CheckList.FAIL,
CheckList.ENDC), e.message) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_riak():
""" Riak checks the connection It displays on the screen whether or not you have a connection. """ |
from pyoko.db.connection import client
from socket import error as socket_error
try:
if client.ping():
print(__(u"{0}Riak is working{1}").format(CheckList.OKGREEN, CheckList.ENDC))
else:
print(__(u"{0}Riak is not working{1}").format(CheckList.FAIL, CheckList.ENDC))
except socket_error as e:
print(__(u"{0}Riak is not working{1}").format(CheckList.FAIL,
CheckList.ENDC), e.message) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_mq_connection(self):
""" RabbitMQ checks the connection It displays on the screen whether or not you have a connection. """ |
import pika
from zengine.client_queue import BLOCKING_MQ_PARAMS
from pika.exceptions import ProbableAuthenticationError, ConnectionClosed
try:
connection = pika.BlockingConnection(BLOCKING_MQ_PARAMS)
channel = connection.channel()
if channel.is_open:
print(__(u"{0}RabbitMQ is working{1}").format(CheckList.OKGREEN, CheckList.ENDC))
elif self.channel.is_closed or self.channel.is_closing:
print(__(u"{0}RabbitMQ is not working!{1}").format(CheckList.FAIL, CheckList.ENDC))
except ConnectionClosed as e:
print(__(u"{0}RabbitMQ is not working!{1}").format(CheckList.FAIL, CheckList.ENDC), e)
except ProbableAuthenticationError as e:
print(__(u"{0}RabbitMQ username and password wrong{1}").format(CheckList.FAIL,
CheckList.ENDC)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_encoding_and_env():
""" It brings the environment variables to the screen. The user checks to see if they are using the correct variables. """ |
import sys
import os
if sys.getfilesystemencoding() in ['utf-8', 'UTF-8']:
print(__(u"{0}File system encoding correct{1}").format(CheckList.OKGREEN,
CheckList.ENDC))
else:
print(__(u"{0}File system encoding wrong!!{1}").format(CheckList.FAIL,
CheckList.ENDC))
check_env_list = ['RIAK_PROTOCOL', 'RIAK_SERVER', 'RIAK_PORT', 'REDIS_SERVER',
'DEFAULT_BUCKET_TYPE', 'PYOKO_SETTINGS',
'MQ_HOST', 'MQ_PORT', 'MQ_USER', 'MQ_VHOST',
]
env = os.environ
for k, v in env.items():
if k in check_env_list:
print(__(u"{0}{1} : {2}{3}").format(CheckList.BOLD, k, v, CheckList.ENDC)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def no_moves(position):
""" Finds if the game is over. :type: position: Board :rtype: bool """ |
return position.no_moves(color.white) \
or position.no_moves(color.black) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_checkmate(position, input_color):
""" Finds if particular King is checkmated. :type: position: Board :type: input_color: Color :rtype: bool """ |
return position.no_moves(input_color) and \
position.get_king(input_color).in_check(position) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _paginate(self, current_page, query_set, per_page=10):
""" Handles pagination of object listings. Args: current_page int: Current page number query_set (:class:`QuerySet<pyoko:pyoko.db.queryset.QuerySet>`):
Object listing queryset. per_page int: Objects per page. Returns: QuerySet object, pagination data dict as a tuple """ |
total_objects = query_set.count()
total_pages = int(total_objects / per_page or 1)
# add orphans to last page
current_per_page = per_page + (
total_objects % per_page if current_page == total_pages else 0)
pagination_data = dict(page=current_page,
total_pages=total_pages,
total_objects=total_objects,
per_page=current_per_page)
query_set = query_set.set_params(rows=current_per_page, start=(current_page - 1) * per_page)
return query_set, pagination_data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_message(current):
""" Creates a message for the given channel. .. code-block:: python # request: { 'view':'_zops_create_message', 'message': { 'channel': key, # of channel 'body': string, # message text., 'type': int, # zengine.messaging.model.MSG_TYPES, 'attachments': [{ 'description': string, # can be blank, 'name': string, # file name with extension, 'content': string, # base64 encoded file content }]} # response: { 'status': 'Created', 'code': 201, 'msg_key': key, # key of the message object, } """ |
msg = current.input['message']
msg_obj = Channel.add_message(msg['channel'], body=msg['body'], typ=msg['type'],
sender=current.user,
title=msg['title'], receiver=msg['receiver'] or None)
current.output = {
'msg_key': msg_obj.key,
'status': 'Created',
'code': 201
}
if 'attachment' in msg:
for atch in msg['attachments']:
typ = current._dedect_file_type(atch['name'], atch['content'])
Attachment(channel_id=msg['channel'], msg=msg_obj, name=atch['name'],
file=atch['content'], description=atch['description'], typ=typ).save() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def show_channel(current, waited=False):
""" Initial display of channel content. Returns channel description, members, no of members, last 20 messages etc. .. code-block:: python # request: { 'view':'_zops_show_channel', 'key': key, } # response: { 'channel_key': key, 'description': string, 'no_of_members': int, 'member_list': [ {'name': string, 'is_online': bool, 'avatar_url': string, }], 'name': string, 'last_messages': [MSG_DICT] 'status': 'OK', 'code': 200 } """ |
ch = Channel(current).objects.get(current.input['key'])
sbs = ch.get_subscription_for_user(current.user_id)
current.output = {'key': current.input['key'],
'description': ch.description,
'name': sbs.name,
'actions': sbs.get_actions(),
'avatar_url': ch.get_avatar(current.user),
'no_of_members': len(ch.subscriber_set),
'member_list': [{'name': sb.user.full_name,
'is_online': sb.user.is_online(),
'avatar_url': sb.user.get_avatar_url()
} for sb in ch.subscriber_set.objects.all()],
'last_messages': [],
'status': 'OK',
'code': 200
}
for msg in ch.get_last_messages():
current.output['last_messages'].insert(0, msg.serialize(current.user)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def channel_history(current):
""" Get old messages for a channel. 20 messages per request .. code-block:: python # request: { 'view':'_zops_channel_history, 'channel_key': key, 'timestamp': datetime, # timestamp data of oldest shown message } # response: { 'messages': [MSG_DICT, ], 'status': 'OK', 'code': 200 } """ |
current.output = {
'status': 'OK',
'code': 201,
'messages': []
}
for msg in list(Message.objects.filter(channel_id=current.input['channel_key'],
updated_at__lte=current.input['timestamp'])[:20]):
current.output['messages'].insert(0, msg.serialize(current.user))
# FIXME: looks like pyoko's __lt is broken
# TODO: convert lte to lt and remove this block, when __lt filter fixed
if current.output['messages']:
current.output['messages'].pop(-1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def report_last_seen_message(current):
""" Push timestamp of latest message of an ACTIVE channel. This view should be called with timestamp of latest message; - When user opens (clicks on) a channel. - Periodically (eg: setInterval for 15secs) while user staying in a channel. .. code-block:: python # request: { 'view':'_zops_last_seen_msg', 'channel_key': key, 'key': key, 'timestamp': datetime, } # response: { 'status': 'OK', 'code': 200, } """ |
sbs = Subscriber(current).objects.filter(channel_id=current.input['channel_key'],
user_id=current.user_id)[0]
sbs.last_seen_msg_time = current.input['timestamp']
sbs.save()
current.output = {
'status': 'OK',
'code': 200} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_channels(current):
""" List channel memberships of current user .. code-block:: python # request: { 'view':'_zops_list_channels', } # response: { 'channels': [ {'name': string, # name of channel 'key': key, # key of channel 'unread': int, # unread message count 'type': int, # channel type, # 15: public channels (chat room/broadcast channel distinction comes from "read_only" flag) # 10: direct channels # 5: one and only private channel which is "Notifications" 'read_only': boolean, # true if this is a read-only subscription to a broadcast channel # false if it's a public chat room 'actions':[('action name', 'view name'),] },] } """ |
current.output = {
'status': 'OK',
'code': 200,
'channels': []}
for sbs in current.user.subscriptions.objects.filter(is_visible=True):
try:
current.output['channels'].append(sbs.get_channel_listing())
except ObjectDoesNotExist:
# FIXME: This should not happen,
log.exception("UNPAIRED DIRECT EXCHANGES!!!!")
sbs.delete() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unread_count(current):
""" Number of unread messages for current user .. code-block:: python # request: { 'view':'_zops_unread_count', } # response: { 'status': 'OK', 'code': 200, 'notifications': int, 'messages': int, } """ |
unread_ntf = 0
unread_msg = 0
for sbs in current.user.subscriptions.objects.filter(is_visible=True):
try:
if sbs.channel.key == current.user.prv_exchange:
unread_ntf += sbs.unread_count()
else:
unread_msg += sbs.unread_count()
except ObjectDoesNotExist:
# FIXME: This should not happen,
log.exception("MULTIPLE PRV EXCHANGES!!!!")
sbs.delete()
current.output = {
'status': 'OK',
'code': 200,
'notifications': unread_ntf,
'messages': unread_msg
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_notifications(current):
""" Returns last N notifications for current user .. code-block:: python # request: { 'view':'_zops_unread_messages', 'amount': int, # Optional, defaults to 8 } # response: { 'status': 'OK', 'code': 200, 'notifications': [{'title':string, 'body': string, 'channel_key': key, 'type': int, 'url': string, # could be a in app JS URL prefixed with "#" or # full blown URL prefixed with "http" 'message_key': key, 'timestamp': datetime},], } """ |
current.output = {
'status': 'OK',
'code': 200,
'notifications': [],
}
amount = current.input.get('amount', 8)
try:
notif_sbs = current.user.subscriptions.objects.get(channel_id=current.user.prv_exchange)
except MultipleObjectsReturned:
# FIXME: This should not happen,
log.exception("MULTIPLE PRV EXCHANGES!!!!")
sbs = current.user.subscriptions.objects.filter(channel_id=current.user.prv_exchange)
sbs[0].delete()
notif_sbs = sbs[1]
for msg in notif_sbs.channel.message_set.objects.all()[:amount]:
current.output['notifications'].insert(0, {
'title': msg.msg_title,
'body': msg.body,
'type': msg.typ,
'url': msg.url,
'channel_key': msg.channel.key,
'message_key': msg.key,
'timestamp': msg.updated_at}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_channel(current):
""" Create a public channel. Can be a broadcast channel or normal chat room. Chat room and broadcast distinction will be made at user subscription phase. .. code-block:: python # request: { 'view':'_zops_create_channel', 'name': string, 'description': string, } # response: { 'description': string, 'name': string, 'no_of_members': int, 'member_list': [ {'name': string, 'is_online': bool, 'avatar_url': string, }], 'last_messages': [MSG_DICT] 'status': 'Created', 'code': 201, 'key': key, # of just created channel } """ |
channel = Channel(name=current.input['name'],
description=current.input['description'],
owner=current.user,
typ=15).save()
with BlockSave(Subscriber):
Subscriber.objects.get_or_create(user=channel.owner,
channel=channel,
can_manage=True,
can_leave=False)
current.input['key'] = channel.key
show_channel(current)
current.output.update({
'status': 'Created',
'code': 201
}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_unit_to_channel(current):
""" Subscribe users of a given unit to given channel JSON API: .. code-block:: python # request: { 'view':'_zops_add_unit_to_channel', 'unit_key': key, 'channel_key': key, 'read_only': boolean, # true if this is a Broadcast channel, # false if it's a normal chat room } # response: { 'existing': [key,], # existing members 'newly_added': [key,], # newly added members 'status': 'Created', 'code': 201 } """ |
read_only = current.input['read_only']
newly_added, existing = [], []
for member_key in UnitModel.get_user_keys(current, current.input['unit_key']):
sb, new = Subscriber(current).objects.get_or_create(user_id=member_key,
read_only=read_only,
channel_id=current.input['channel_key'])
if new:
newly_added.append(member_key)
else:
existing.append(member_key)
current.output = {
'existing': existing,
'newly_added': newly_added,
'status': 'OK',
'code': 201
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search_user(current):
""" Search users for adding to a public room or creating one to one direct messaging .. code-block:: python # request: { 'view':'_zops_search_user', 'query': string, } # response: { 'results': [('full_name', 'key', 'avatar_url'), ], 'status': 'OK', 'code': 200 } """ |
current.output = {
'results': [],
'status': 'OK',
'code': 201
}
qs = UserModel(current).objects.exclude(key=current.user_id).search_on(
*settings.MESSAGING_USER_SEARCH_FIELDS,
contains=current.input['query'])
# FIXME: somehow exclude(key=current.user_id) not working with search_on()
for user in qs:
if user.key != current.user_id:
current.output['results'].append((user.full_name, user.key, user.get_avatar_url())) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search_unit(current):
""" Search on units for subscribing it's users to a channel .. code-block:: python # request: { 'view':'_zops_search_unit', 'query': string, } # response: { 'results': [('name', 'key'), ], 'status': 'OK', 'code': 200 } """ |
current.output = {
'results': [],
'status': 'OK',
'code': 201
}
for user in UnitModel(current).objects.search_on(*settings.MESSAGING_UNIT_SEARCH_FIELDS,
contains=current.input['query']):
current.output['results'].append((user.name, user.key)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_direct_channel(current):
""" Create a One-To-One channel between current and selected user. .. code-block:: python # request: { 'view':'_zops_create_direct_channel', 'user_key': key, } # response: { 'description': string, 'no_of_members': int, 'member_list': [ {'name': string, 'is_online': bool, 'avatar_url': string, }], 'last_messages': [MSG_DICT] 'status': 'Created', 'code': 201, 'channel_key': key, # of just created channel 'name': string, # name of subscribed channel } """ |
channel, sub_name = Channel.get_or_create_direct_channel(current.user_id,
current.input['user_key'])
current.input['key'] = channel.key
show_channel(current)
current.output.update({
'status': 'Created',
'code': 201
}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_message(current):
""" Search in messages. If "channel_key" given, search will be limited to that channel, otherwise search will be performed on all of user's subscribed channels. .. code-block:: python # request: { 'view':'_zops_search_unit, 'channel_key': key, 'query': string, 'page': int, } # response: { 'results': [MSG_DICT, ], 'pagination': { 'page': int, # current page 'total_pages': int, 'total_objects': int, 'per_page': int, # object per page }, 'status': 'OK', 'code': 200 } """ |
current.output = {
'results': [],
'status': 'OK',
'code': 201
}
query_set = Message(current).objects.search_on(['msg_title', 'body', 'url'],
contains=current.input['query'])
if current.input['channel_key']:
query_set = query_set.filter(channel_id=current.input['channel_key'])
else:
subscribed_channels = Subscriber.objects.filter(user_id=current.user_id).values_list(
"channel_id", flatten=True)
query_set = query_set.filter(channel_id__in=subscribed_channels)
query_set, pagination_data = _paginate(current_page=current.input['page'], query_set=query_set)
current.output['pagination'] = pagination_data
for msg in query_set:
current.output['results'].append(msg.serialize(current.user)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete_channel(current):
""" Delete a channel .. code-block:: python # request: { 'view':'_zops_delete_channel, 'channel_key': key, } # response: { 'status': 'OK', 'code': 200 } """ |
ch_key = current.input['channel_key']
ch = Channel(current).objects.get(owner_id=current.user_id, key=ch_key)
ch.delete()
Subscriber.objects.filter(channel_id=ch_key).delete()
Message.objects.filter(channel_id=ch_key).delete()
current.output = {'status': 'Deleted', 'code': 200} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def edit_channel(current):
""" Update channel name or description .. code-block:: python # request: { 'view':'_zops_edit_channel, 'channel_key': key, 'name': string, 'description': string, } # response: { 'status': 'OK', 'code': 200 } """ |
ch = Channel(current).objects.get(owner_id=current.user_id,
key=current.input['channel_key'])
ch.name = current.input['name']
ch.description = current.input['description']
ch.save()
for sbs in ch.subscriber_set.objects.all():
sbs.name = ch.name
sbs.save()
current.output = {'status': 'OK', 'code': 200} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pin_channel(current):
""" Pin a channel to top of channel list .. code-block:: python # request: { 'view':'_zops_pin_channel, 'channel_key': key, } # response: { 'status': 'OK', 'code': 200 } """ |
try:
Subscriber(current).objects.filter(user_id=current.user_id,
channel_id=current.input['channel_key']).update(
pinned=True)
current.output = {'status': 'OK', 'code': 200}
except ObjectDoesNotExist:
raise HTTPError(404, "") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete_message(current):
""" Delete a message .. code-block:: python # request: { 'view':'_zops_delete_message, 'message_key': key, } # response: { 'key': key, 'status': 'OK', 'code': 200 } """ |
try:
Message(current).objects.get(sender_id=current.user_id,
key=current.input['key']).delete()
current.output = {'status': 'Deleted', 'code': 200, 'key': current.input['key']}
except ObjectDoesNotExist:
raise HTTPError(404, "") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def edit_message(current):
""" Edit a message a user own. .. code-block:: python # request: { 'view':'_zops_edit_message', 'message': { 'body': string, # message text 'key': key } } # response: { 'status': string, # 'OK' for success 'code': int, # 200 for success } """ |
current.output = {'status': 'OK', 'code': 200}
in_msg = current.input['message']
try:
msg = Message(current).objects.get(sender_id=current.user_id, key=in_msg['key'])
msg.body = in_msg['body']
msg.save()
except ObjectDoesNotExist:
raise HTTPError(404, "") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def flag_message(current):
""" Flag inappropriate messages .. code-block:: python # request: { 'view':'_zops_flag_message', 'message_key': key, } # response: { ' 'status': 'Created', 'code': 201, } """ |
current.output = {'status': 'Created', 'code': 201}
FlaggedMessage.objects.get_or_create(user_id=current.user_id,
message_id=current.input['key']) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.