sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def set_terminal_key(self, encrypted_key):
"""
Change the terminal key. The encrypted_key is a hex string.
encrypted_key is expected to be encrypted under master key
"""
if encrypted_key:
try:
new_key = bytes.fromhex(encrypted_key)
if len(self.terminal_key) != len(new_key):
# The keys must have equal length
return False
self.terminal_key = self.tmk_cipher.decrypt(new_key)
self.store_terminal_key(raw2str(self.terminal_key))
self.tpk_cipher = DES3.new(self.terminal_key, DES3.MODE_ECB)
self.print_keys()
return True
except ValueError:
return False
return False
|
Change the terminal key. The encrypted_key is a hex string.
encrypted_key is expected to be encrypted under master key
|
entailment
|
def get_encrypted_pin(self, clear_pin, card_number):
"""
Get PIN block in ISO 0 format, encrypted with the terminal key
"""
if not self.terminal_key:
print('Terminal key is not set')
return ''
if self.pinblock_format == '01':
try:
pinblock = bytes.fromhex(get_pinblock(clear_pin, card_number))
#print('PIN block: {}'.format(raw2str(pinblock)))
except TypeError:
return ''
encrypted_pinblock = self.tpk_cipher.encrypt(pinblock)
return raw2str(encrypted_pinblock)
else:
print('Unsupported PIN Block format')
return ''
|
Get PIN block in ISO 0 format, encrypted with the terminal key
|
entailment
|
def is_url(string, allowed_schemes=None):
"""
Check if a string is a valid url.
:param string: String to check.
:param allowed_schemes: List of valid schemes ('http', 'https', 'ftp'...). Default to None (any scheme is valid).
:return: True if url, false otherwise
:rtype: bool
"""
if not is_full_string(string):
return False
valid = bool(URL_RE.search(string))
if allowed_schemes:
return valid and any([string.startswith(s) for s in allowed_schemes])
return valid
|
Check if a string is a valid url.
:param string: String to check.
:param allowed_schemes: List of valid schemes ('http', 'https', 'ftp'...). Default to None (any scheme is valid).
:return: True if url, false otherwise
:rtype: bool
|
entailment
|
def is_credit_card(string, card_type=None):
"""
Checks if a string is a valid credit card number.
If card type is provided then it checks that specific type,
otherwise any known credit card number will be accepted.
:param string: String to check.
:type string: str
:param card_type: Card type.
:type card_type: str
Can be one of these:
* VISA
* MASTERCARD
* AMERICAN_EXPRESS
* DINERS_CLUB
* DISCOVER
* JCB
or None. Default to None (any card).
:return: True if credit card, false otherwise.
:rtype: bool
"""
if not is_full_string(string):
return False
if card_type:
if card_type not in CREDIT_CARDS:
raise KeyError(
'Invalid card type "{}". Valid types are: {}'.format(card_type, ', '.join(CREDIT_CARDS.keys()))
)
return bool(CREDIT_CARDS[card_type].search(string))
for c in CREDIT_CARDS:
if CREDIT_CARDS[c].search(string):
return True
return False
|
Checks if a string is a valid credit card number.
If card type is provided then it checks that specific type,
otherwise any known credit card number will be accepted.
:param string: String to check.
:type string: str
:param card_type: Card type.
:type card_type: str
Can be one of these:
* VISA
* MASTERCARD
* AMERICAN_EXPRESS
* DINERS_CLUB
* DISCOVER
* JCB
or None. Default to None (any card).
:return: True if credit card, false otherwise.
:rtype: bool
|
entailment
|
def is_snake_case(string, separator='_'):
"""
Checks if a string is formatted as snake case.
A string is considered snake case when:
* it's composed only by lowercase letters ([a-z]), underscores (or provided separator) \
and optionally numbers ([0-9])
* it does not start/end with an underscore (or provided separator)
* it does not start with a number
:param string: String to test.
:type string: str
:param separator: String to use as separator.
:type separator: str
:return: True for a snake case string, false otherwise.
:rtype: bool
"""
if is_full_string(string):
re_map = {
'_': SNAKE_CASE_TEST_RE,
'-': SNAKE_CASE_TEST_DASH_RE
}
re_template = '^[a-z]+([a-z\d]+{sign}|{sign}[a-z\d]+)+[a-z\d]+$'
r = re_map.get(separator, re.compile(re_template.format(sign=re.escape(separator))))
return bool(r.search(string))
return False
|
Checks if a string is formatted as snake case.
A string is considered snake case when:
* it's composed only by lowercase letters ([a-z]), underscores (or provided separator) \
and optionally numbers ([0-9])
* it does not start/end with an underscore (or provided separator)
* it does not start with a number
:param string: String to test.
:type string: str
:param separator: String to use as separator.
:type separator: str
:return: True for a snake case string, false otherwise.
:rtype: bool
|
entailment
|
def is_json(string):
"""
Check if a string is a valid json.
:param string: String to check.
:type string: str
:return: True if json, false otherwise
:rtype: bool
"""
if not is_full_string(string):
return False
if bool(JSON_WRAPPER_RE.search(string)):
try:
return isinstance(json.loads(string), dict)
except (TypeError, ValueError, OverflowError):
return False
return False
|
Check if a string is a valid json.
:param string: String to check.
:type string: str
:return: True if json, false otherwise
:rtype: bool
|
entailment
|
def is_palindrome(string, strict=True):
"""
Checks if the string is a palindrome (https://en.wikipedia.org/wiki/Palindrome).
:param string: String to check.
:type string: str
:param strict: True if white spaces matter (default), false otherwise.
:type strict: bool
:return: True if the string is a palindrome (like "otto", or "i topi non avevano nipoti" if strict=False),
False otherwise
"""
if is_full_string(string):
if strict:
return reverse(string) == string
return is_palindrome(SPACES_RE.sub('', string))
return False
|
Checks if the string is a palindrome (https://en.wikipedia.org/wiki/Palindrome).
:param string: String to check.
:type string: str
:param strict: True if white spaces matter (default), false otherwise.
:type strict: bool
:return: True if the string is a palindrome (like "otto", or "i topi non avevano nipoti" if strict=False),
False otherwise
|
entailment
|
def is_pangram(string):
"""
Checks if the string is a pangram (https://en.wikipedia.org/wiki/Pangram).
:param string: String to check.
:type string: str
:return: True if the string is a pangram, False otherwise.
"""
return is_full_string(string) and set(SPACES_RE.sub('', string)).issuperset(letters_set)
|
Checks if the string is a pangram (https://en.wikipedia.org/wiki/Pangram).
:param string: String to check.
:type string: str
:return: True if the string is a pangram, False otherwise.
|
entailment
|
def is_slug(string, sign='-'):
"""
Checks if a given string is a slug.
:param string: String to check.
:type string: str
:param sign: Join sign used by the slug.
:type sign: str
:return: True if slug, false otherwise.
"""
if not is_full_string(string):
return False
rex = r'^([a-z\d]+' + re.escape(sign) + r'?)*[a-z\d]$'
return re.match(rex, string) is not None
|
Checks if a given string is a slug.
:param string: String to check.
:type string: str
:param sign: Join sign used by the slug.
:type sign: str
:return: True if slug, false otherwise.
|
entailment
|
def camel_case_to_snake(string, separator='_'):
"""
Convert a camel case string into a snake case one.
(The original string is returned if is not a valid camel case string)
:param string: String to convert.
:type string: str
:param separator: Sign to use as separator.
:type separator: str
:return: Converted string.
:rtype: str
"""
if not is_string(string):
raise TypeError('Expected string')
if not is_camel_case(string):
return string
return CAMEL_CASE_REPLACE_RE.sub(lambda m: m.group(1) + separator, string).lower()
|
Convert a camel case string into a snake case one.
(The original string is returned if is not a valid camel case string)
:param string: String to convert.
:type string: str
:param separator: Sign to use as separator.
:type separator: str
:return: Converted string.
:rtype: str
|
entailment
|
def snake_case_to_camel(string, upper_case_first=True, separator='_'):
"""
Convert a snake case string into a camel case one.
(The original string is returned if is not a valid snake case string)
:param string: String to convert.
:type string: str
:param upper_case_first: True to turn the first letter into uppercase (default).
:type upper_case_first: bool
:param separator: Sign to use as separator (default to "_").
:type separator: str
:return: Converted string
:rtype: str
"""
if not is_string(string):
raise TypeError('Expected string')
if not is_snake_case(string, separator):
return string
re_map = {
'_': SNAKE_CASE_REPLACE_RE,
'-': SNAKE_CASE_REPLACE_DASH_RE
}
r = re_map.get(separator, re.compile('({sign})([a-z\d])'.format(sign=re.escape(separator))))
string = r.sub(lambda m: m.group(2).upper(), string)
if upper_case_first:
return string[0].upper() + string[1:]
return string
|
Convert a snake case string into a camel case one.
(The original string is returned if is not a valid snake case string)
:param string: String to convert.
:type string: str
:param upper_case_first: True to turn the first letter into uppercase (default).
:type upper_case_first: bool
:param separator: Sign to use as separator (default to "_").
:type separator: str
:return: Converted string
:rtype: str
|
entailment
|
def shuffle(string):
"""
Return a new string containing shuffled items.
:param string: String to shuffle
:type string: str
:return: Shuffled string
:rtype: str
"""
s = sorted(string) # turn the string into a list of chars
random.shuffle(s) # shuffle the list
return ''.join(s)
|
Return a new string containing shuffled items.
:param string: String to shuffle
:type string: str
:return: Shuffled string
:rtype: str
|
entailment
|
def strip_html(string, keep_tag_content=False):
"""
Remove html code contained into the given string.
:param string: String to manipulate.
:type string: str
:param keep_tag_content: True to preserve tag content, False to remove tag and its content too (default).
:type keep_tag_content: bool
:return: String with html removed.
:rtype: str
"""
r = HTML_TAG_ONLY_RE if keep_tag_content else HTML_RE
return r.sub('', string)
|
Remove html code contained into the given string.
:param string: String to manipulate.
:type string: str
:param keep_tag_content: True to preserve tag content, False to remove tag and its content too (default).
:type keep_tag_content: bool
:return: String with html removed.
:rtype: str
|
entailment
|
def prettify(string):
"""
Turns an ugly text string into a beautiful one by applying a regex pipeline which ensures the following:
- String cannot start or end with spaces
- String cannot have multiple sequential spaces, empty lines or punctuation (except for "?", "!" and ".")
- Arithmetic operators (+, -, /, \*, =) must have one, and only one space before and after themselves
- The first letter after a dot, an exclamation or a question mark must be uppercase
- One, and only one space should follow a dot, an exclamation or a question mark
- Text inside double quotes cannot start or end with spaces, but one, and only one space must come first and \
after quotes (foo" bar"baz -> foo "bar" baz)
- Text inside round brackets cannot start or end with spaces, but one, and only one space must come first and \
after brackets ("foo(bar )baz" -> "foo (bar) baz")
- Percentage sign ("%") cannot be preceded by a space if there is a number before ("100 %" -> "100%")
- Saxon genitive is correct ("Dave' s dog" -> "Dave's dog")
:param string: String to manipulate
:return: Prettified string.
:rtype: str
"""
def remove_duplicates(regex_match):
return regex_match.group(1)[0]
def uppercase_first_letter_after_sign(regex_match):
match = regex_match.group(1)
return match[:-1] + match[2].upper()
def ensure_right_space_only(regex_match):
return regex_match.group(1).strip() + ' '
def ensure_left_space_only(regex_match):
return ' ' + regex_match.group(1).strip()
def ensure_spaces_around(regex_match):
return ' ' + regex_match.group(1).strip() + ' '
def remove_internal_spaces(regex_match):
return regex_match.group(1).strip()
def fix_saxon_genitive(regex_match):
return regex_match.group(1).replace(' ', '') + ' '
p = PRETTIFY_RE['DUPLICATES'].sub(remove_duplicates, string)
p = PRETTIFY_RE['RIGHT_SPACE'].sub(ensure_right_space_only, p)
p = PRETTIFY_RE['LEFT_SPACE'].sub(ensure_left_space_only, p)
p = PRETTIFY_RE['SPACES_AROUND'].sub(ensure_spaces_around, p)
p = PRETTIFY_RE['SPACES_INSIDE'].sub(remove_internal_spaces, p)
p = PRETTIFY_RE['UPPERCASE_AFTER_SIGN'].sub(uppercase_first_letter_after_sign, p)
p = PRETTIFY_RE['SAXON_GENITIVE'].sub(fix_saxon_genitive, p)
p = p.strip()
try:
return p[0].capitalize() + p[1:]
except IndexError:
return p
|
Turns an ugly text string into a beautiful one by applying a regex pipeline which ensures the following:
- String cannot start or end with spaces
- String cannot have multiple sequential spaces, empty lines or punctuation (except for "?", "!" and ".")
- Arithmetic operators (+, -, /, \*, =) must have one, and only one space before and after themselves
- The first letter after a dot, an exclamation or a question mark must be uppercase
- One, and only one space should follow a dot, an exclamation or a question mark
- Text inside double quotes cannot start or end with spaces, but one, and only one space must come first and \
after quotes (foo" bar"baz -> foo "bar" baz)
- Text inside round brackets cannot start or end with spaces, but one, and only one space must come first and \
after brackets ("foo(bar )baz" -> "foo (bar) baz")
- Percentage sign ("%") cannot be preceded by a space if there is a number before ("100 %" -> "100%")
- Saxon genitive is correct ("Dave' s dog" -> "Dave's dog")
:param string: String to manipulate
:return: Prettified string.
:rtype: str
|
entailment
|
def slugify(string, sign='-'):
"""
Converts a string into a slug using provided join sign.
(**(This Is A "Test"!)** -> **this-is-a-test**)
:param string: String to convert.
:type string: str
:param sign: Sign used to join string tokens (default to "-").
:type sign: str
:return: Slugified string
"""
if not is_string(string):
raise TypeError('Expected string')
# unicode casting for python 2 (unicode is default for python 3)
try:
string = unicode(string, 'utf-8')
except NameError:
pass
# replace any character that is NOT letter or number with spaces
s = NO_LETTERS_OR_NUMBERS_RE.sub(' ', string.lower()).strip()
# replace spaces with join sign
s = SPACES_RE.sub(sign, s)
# normalize joins (remove duplicates)
s = re.sub(re.escape(sign) + r'+', sign, s)
# translate non-ascii signs
s = unicodedata.normalize('NFD', s).encode('ascii', 'ignore').decode('utf-8')
return s
|
Converts a string into a slug using provided join sign.
(**(This Is A "Test"!)** -> **this-is-a-test**)
:param string: String to convert.
:type string: str
:param sign: Sign used to join string tokens (default to "-").
:type sign: str
:return: Slugified string
|
entailment
|
def setup(app):
"""Initialize Sphinx extension."""
app.setup_extension('nbsphinx')
app.add_source_suffix('.nblink', 'linked_jupyter_notebook')
app.add_source_parser(LinkedNotebookParser)
app.add_config_value('nbsphinx_link_target_root', None, rebuild='env')
return {'version': __version__, 'parallel_read_safe': True}
|
Initialize Sphinx extension.
|
entailment
|
def parse(self, inputstring, document):
"""Parse the nblink file.
Adds the linked file as a dependency, read the file, and
pass the content to the nbshpinx.NotebookParser.
"""
link = json.loads(inputstring)
env = document.settings.env
source_dir = os.path.dirname(env.doc2path(env.docname))
abs_path = os.path.normpath(os.path.join(source_dir, link['path']))
path = utils.relative_path(None, abs_path)
path = nodes.reprunicode(path)
document.settings.record_dependencies.add(path)
env.note_dependency(path)
target_root = env.config.nbsphinx_link_target_root
target = utils.relative_path(target_root, abs_path)
target = nodes.reprunicode(target).replace(os.path.sep, '/')
env.metadata[env.docname]['nbsphinx-link-target'] = target
# Copy parser from nbsphinx for our cutom format
try:
formats = env.config.nbsphinx_custom_formats
except AttributeError:
pass
else:
formats.setdefault(
'.nblink',
lambda s: nbformat.reads(s, as_version=_ipynbversion))
try:
include_file = io.FileInput(source_path=path, encoding='utf8')
except UnicodeEncodeError as error:
raise NotebookError(u'Problems with linked notebook "%s" path:\n'
'Cannot encode input file path "%s" '
'(wrong locale?).' %
(env.docname, SafeString(path)))
except IOError as error:
raise NotebookError(u'Problems with linked notebook "%s" path:\n%s.' %
(env.docname, ErrorString(error)))
try:
rawtext = include_file.read()
except UnicodeError as error:
raise NotebookError(u'Problem with linked notebook "%s":\n%s' %
(env.docname, ErrorString(error)))
return super(LinkedNotebookParser, self).parse(rawtext, document)
|
Parse the nblink file.
Adds the linked file as a dependency, read the file, and
pass the content to the nbshpinx.NotebookParser.
|
entailment
|
def finalize(self):
"""Output the duplicate scripts detected."""
if self.total_duplicate > 0:
print('{} duplicate scripts found'.format(self.total_duplicate))
for duplicate in self.list_duplicate:
print(duplicate)
|
Output the duplicate scripts detected.
|
entailment
|
def analyze(self, scratch, **kwargs):
"""Run and return the results from the DuplicateScripts plugin.
Only takes into account scripts with more than 3 blocks.
"""
scripts_set = set()
for script in self.iter_scripts(scratch):
if script[0].type.text == 'define %s':
continue # Ignore user defined scripts
blocks_list = []
for name, _, _ in self.iter_blocks(script.blocks):
blocks_list.append(name)
blocks_tuple = tuple(blocks_list)
if blocks_tuple in scripts_set:
if len(blocks_list) > 3:
self.total_duplicate += 1
self.list_duplicate.append(blocks_list)
else:
scripts_set.add(blocks_tuple)
|
Run and return the results from the DuplicateScripts plugin.
Only takes into account scripts with more than 3 blocks.
|
entailment
|
def _set_content_type(self, system):
""" Set response content type """
request = system.get('request')
if request:
response = request.response
ct = response.content_type
if ct == response.default_content_type:
response.content_type = 'application/json'
|
Set response content type
|
entailment
|
def _render_response(self, value, system):
""" Render a response """
view = system['view']
enc_class = getattr(view, '_json_encoder', None)
if enc_class is None:
enc_class = get_json_encoder()
return json.dumps(value, cls=enc_class)
|
Render a response
|
entailment
|
def _get_common_kwargs(self, system):
""" Get kwargs common for all methods. """
enc_class = getattr(system['view'], '_json_encoder', None)
if enc_class is None:
enc_class = get_json_encoder()
return {
'request': system['request'],
'encoder': enc_class,
}
|
Get kwargs common for all methods.
|
entailment
|
def _get_create_update_kwargs(self, value, common_kw):
""" Get kwargs common to create, update, replace. """
kw = common_kw.copy()
kw['body'] = value
if '_self' in value:
kw['headers'] = [('Location', value['_self'])]
return kw
|
Get kwargs common to create, update, replace.
|
entailment
|
def render_create(self, value, system, common_kw):
""" Render response for view `create` method (collection POST) """
kw = self._get_create_update_kwargs(value, common_kw)
return JHTTPCreated(**kw)
|
Render response for view `create` method (collection POST)
|
entailment
|
def render_update(self, value, system, common_kw):
""" Render response for view `update` method (item PATCH) """
kw = self._get_create_update_kwargs(value, common_kw)
return JHTTPOk('Updated', **kw)
|
Render response for view `update` method (item PATCH)
|
entailment
|
def render_delete_many(self, value, system, common_kw):
""" Render response for view `delete_many` method (collection DELETE)
"""
if isinstance(value, dict):
return JHTTPOk(extra=value)
msg = 'Deleted {} {}(s) objects'.format(
value, system['view'].Model.__name__)
return JHTTPOk(msg, **common_kw.copy())
|
Render response for view `delete_many` method (collection DELETE)
|
entailment
|
def render_update_many(self, value, system, common_kw):
""" Render response for view `update_many` method
(collection PUT/PATCH)
"""
msg = 'Updated {} {}(s) objects'.format(
value, system['view'].Model.__name__)
return JHTTPOk(msg, **common_kw.copy())
|
Render response for view `update_many` method
(collection PUT/PATCH)
|
entailment
|
def _render_response(self, value, system):
""" Handle response rendering.
Calls mixin methods according to request.action value.
"""
super_call = super(DefaultResponseRendererMixin, self)._render_response
try:
method_name = 'render_{}'.format(system['request'].action)
except (KeyError, AttributeError):
return super_call(value, system)
method = getattr(self, method_name, None)
if method is not None:
common_kw = self._get_common_kwargs(system)
response = method(value, system, common_kw)
system['request'].response = response
return
return super_call(value, system)
|
Handle response rendering.
Calls mixin methods according to request.action value.
|
entailment
|
def remember(self, request, username, **kw):
""" Returns 'WWW-Authenticate' header with a value that should be used
in 'Authorization' header.
"""
if self.credentials_callback:
token = self.credentials_callback(username, request)
api_key = 'ApiKey {}:{}'.format(username, token)
return [('WWW-Authenticate', api_key)]
|
Returns 'WWW-Authenticate' header with a value that should be used
in 'Authorization' header.
|
entailment
|
def callback(self, username, request):
""" Having :username: return user's identifiers or None. """
credentials = self._get_credentials(request)
if credentials:
username, api_key = credentials
if self.check:
return self.check(username, api_key, request)
|
Having :username: return user's identifiers or None.
|
entailment
|
def _get_credentials(self, request):
""" Extract username and api key token from 'Authorization' header """
authorization = request.headers.get('Authorization')
if not authorization:
return None
try:
authmeth, authbytes = authorization.split(' ', 1)
except ValueError: # not enough values to unpack
return None
if authmeth.lower() != 'apikey':
return None
if six.PY2 or isinstance(authbytes, bytes):
try:
auth = authbytes.decode('utf-8')
except UnicodeDecodeError:
auth = authbytes.decode('latin-1')
else:
auth = authbytes
try:
username, api_key = auth.split(':', 1)
except ValueError: # not enough values to unpack
return None
return username, api_key
|
Extract username and api key token from 'Authorization' header
|
entailment
|
def _get_event_kwargs(view_obj):
""" Helper function to get event kwargs.
:param view_obj: Instance of View that processes the request.
:returns dict: Containing event kwargs or None if events shouldn't
be fired.
"""
request = view_obj.request
view_method = getattr(view_obj, request.action)
do_trigger = not (
getattr(view_method, '_silent', False) or
getattr(view_obj, '_silent', False))
if do_trigger:
event_kwargs = {
'view': view_obj,
'model': view_obj.Model,
'fields': FieldData.from_dict(
view_obj._json_params,
view_obj.Model)
}
ctx = view_obj.context
if hasattr(ctx, 'pk_field') or isinstance(ctx, DataProxy):
event_kwargs['instance'] = ctx
return event_kwargs
|
Helper function to get event kwargs.
:param view_obj: Instance of View that processes the request.
:returns dict: Containing event kwargs or None if events shouldn't
be fired.
|
entailment
|
def _get_event_cls(view_obj, events_map):
""" Helper function to get event class.
:param view_obj: Instance of View that processes the request.
:param events_map: Map of events from which event class should be
picked.
:returns: Found event class.
"""
request = view_obj.request
view_method = getattr(view_obj, request.action)
event_action = (
getattr(view_method, '_event_action', None) or
request.action)
return events_map[event_action]
|
Helper function to get event class.
:param view_obj: Instance of View that processes the request.
:param events_map: Map of events from which event class should be
picked.
:returns: Found event class.
|
entailment
|
def _trigger_events(view_obj, events_map, additional_kw=None):
""" Common logic to trigger before/after events.
:param view_obj: Instance of View that processes the request.
:param events_map: Map of events from which event class should be
picked.
:returns: Instance if triggered event.
"""
if additional_kw is None:
additional_kw = {}
event_kwargs = _get_event_kwargs(view_obj)
if event_kwargs is None:
return
event_kwargs.update(additional_kw)
event_cls = _get_event_cls(view_obj, events_map)
event = event_cls(**event_kwargs)
view_obj.request.registry.notify(event)
return event
|
Common logic to trigger before/after events.
:param view_obj: Instance of View that processes the request.
:param events_map: Map of events from which event class should be
picked.
:returns: Instance if triggered event.
|
entailment
|
def subscribe_to_events(config, subscriber, events, model=None):
""" Helper function to subscribe to group of events.
:param config: Pyramid contig instance.
:param subscriber: Event subscriber function.
:param events: Sequence of events to subscribe to.
:param model: Model predicate value.
"""
kwargs = {}
if model is not None:
kwargs['model'] = model
for evt in events:
config.add_subscriber(subscriber, evt, **kwargs)
|
Helper function to subscribe to group of events.
:param config: Pyramid contig instance.
:param subscriber: Event subscriber function.
:param events: Sequence of events to subscribe to.
:param model: Model predicate value.
|
entailment
|
def add_field_processors(config, processors, model, field):
""" Add processors for model field.
Under the hood, regular nefertari event subscribed is created which
calls field processors in order passed to this function.
Processors are passed following params:
* **new_value**: New value of of field.
* **instance**: Instance affected by request. Is None when set of
items is updated in bulk and when item is created.
* **field**: Instance of nefertari.utils.data.FieldData instance
containing data of changed field.
* **request**: Current Pyramid Request instance.
* **model**: Model class affected by request.
* **event**: Underlying event object.
Each processor must return processed value which is passed to next
processor.
:param config: Pyramid Congurator instance.
:param processors: Sequence of processor functions.
:param model: Model class for field if which processors are
registered.
:param field: Field name for which processors are registered.
"""
before_change_events = (
BeforeCreate,
BeforeUpdate,
BeforeReplace,
BeforeUpdateMany,
BeforeRegister,
)
def wrapper(event, _processors=processors, _field=field):
proc_kw = {
'new_value': event.field.new_value,
'instance': event.instance,
'field': event.field,
'request': event.view.request,
'model': event.model,
'event': event,
}
for proc_func in _processors:
proc_kw['new_value'] = proc_func(**proc_kw)
event.field.new_value = proc_kw['new_value']
event.set_field_value(_field, proc_kw['new_value'])
for evt in before_change_events:
config.add_subscriber(wrapper, evt, model=model, field=field)
|
Add processors for model field.
Under the hood, regular nefertari event subscribed is created which
calls field processors in order passed to this function.
Processors are passed following params:
* **new_value**: New value of of field.
* **instance**: Instance affected by request. Is None when set of
items is updated in bulk and when item is created.
* **field**: Instance of nefertari.utils.data.FieldData instance
containing data of changed field.
* **request**: Current Pyramid Request instance.
* **model**: Model class affected by request.
* **event**: Underlying event object.
Each processor must return processed value which is passed to next
processor.
:param config: Pyramid Congurator instance.
:param processors: Sequence of processor functions.
:param model: Model class for field if which processors are
registered.
:param field: Field name for which processors are registered.
|
entailment
|
def set_field_value(self, field_name, value):
""" Set value of request field named `field_name`.
Use this method to apply changes to object which is affected
by request. Values are set on `view._json_params` dict.
If `field_name` is not affected by request, it is added to
`self.fields` which makes field processors which are connected
to `field_name` to be triggered, if they are run after this
method call(connected to events after handler that performs
method call).
:param field_name: Name of request field value of which should
be set.
:param value: Value to be set.
"""
self.view._json_params[field_name] = value
if field_name in self.fields:
self.fields[field_name].new_value = value
return
fields = FieldData.from_dict({field_name: value}, self.model)
self.fields.update(fields)
|
Set value of request field named `field_name`.
Use this method to apply changes to object which is affected
by request. Values are set on `view._json_params` dict.
If `field_name` is not affected by request, it is added to
`self.fields` which makes field processors which are connected
to `field_name` to be triggered, if they are run after this
method call(connected to events after handler that performs
method call).
:param field_name: Name of request field value of which should
be set.
:param value: Value to be set.
|
entailment
|
def set_field_value(self, field_name, value):
""" Set value of response field named `field_name`.
If response contains single item, its field is set.
If response contains multiple items, all the items in response
are edited.
To edit response meta(e.g. 'count') edit response directly at
`event.response`.
:param field_name: Name of response field value of which should
be set.
:param value: Value to be set.
"""
if self.response is None:
return
if 'data' in self.response:
items = self.response['data']
else:
items = [self.response]
for item in items:
item[field_name] = value
|
Set value of response field named `field_name`.
If response contains single item, its field is set.
If response contains multiple items, all the items in response
are edited.
To edit response meta(e.g. 'count') edit response directly at
`event.response`.
:param field_name: Name of response field value of which should
be set.
:param value: Value to be set.
|
entailment
|
def process_fields_param(fields):
""" Process 'fields' ES param.
* Fields list is split if needed
* '_type' field is added, if not present, so the actual value is
displayed instead of 'None'
"""
if not fields:
return fields
if isinstance(fields, six.string_types):
fields = split_strip(fields)
if '_type' not in fields:
fields.append('_type')
return {
'_source_include': fields,
'_source': True,
}
|
Process 'fields' ES param.
* Fields list is split if needed
* '_type' field is added, if not present, so the actual value is
displayed instead of 'None'
|
entailment
|
def _catch_index_error(self, response):
""" Catch and raise index errors which are not critical and thus
not raised by elasticsearch-py.
"""
code, headers, raw_data = response
if not raw_data:
return
data = json.loads(raw_data)
if not data or not data.get('errors'):
return
try:
error_dict = data['items'][0]['index']
message = error_dict['error']
except (KeyError, IndexError):
return
raise exception_response(400, detail=message)
|
Catch and raise index errors which are not critical and thus
not raised by elasticsearch-py.
|
entailment
|
def setup_mappings(cls, force=False):
""" Setup ES mappings for all existing models.
This method is meant to be run once at application lauch.
ES._mappings_setup flag is set to not run make mapping creation
calls on subsequent runs.
Use `force=True` to make subsequent calls perform mapping
creation calls to ES.
"""
if getattr(cls, '_mappings_setup', False) and not force:
log.debug('ES mappings have been already set up for currently '
'running application. Call `setup_mappings` with '
'`force=True` to perform mappings set up again.')
return
log.info('Setting up ES mappings for all existing models')
models = engine.get_document_classes()
try:
for model_name, model_cls in models.items():
if getattr(model_cls, '_index_enabled', False):
es = cls(model_cls.__name__)
es.put_mapping(body=model_cls.get_es_mapping())
except JHTTPBadRequest as ex:
raise Exception(ex.json['extra']['data'])
cls._mappings_setup = True
|
Setup ES mappings for all existing models.
This method is meant to be run once at application lauch.
ES._mappings_setup flag is set to not run make mapping creation
calls on subsequent runs.
Use `force=True` to make subsequent calls perform mapping
creation calls to ES.
|
entailment
|
def process_chunks(self, documents, operation):
""" Apply `operation` to chunks of `documents` of size
`self.chunk_size`.
"""
chunk_size = self.chunk_size
start = end = 0
count = len(documents)
while count:
if count < chunk_size:
chunk_size = count
end += chunk_size
bulk = documents[start:end]
operation(documents_actions=bulk)
start += chunk_size
count -= chunk_size
|
Apply `operation` to chunks of `documents` of size
`self.chunk_size`.
|
entailment
|
def index_missing_documents(self, documents, request=None):
""" Index documents that are missing from ES index.
Determines which documents are missing using ES `mget` call which
returns a list of document IDs as `documents`. Then missing
`documents` from that list are indexed.
"""
log.info('Trying to index documents of type `{}` missing from '
'`{}` index'.format(self.doc_type, self.index_name))
if not documents:
log.info('No documents to index')
return
query_kwargs = dict(
index=self.index_name,
doc_type=self.doc_type,
fields=['_id'],
body={'ids': [d['_pk'] for d in documents]},
)
try:
response = self.api.mget(**query_kwargs)
except IndexNotFoundException:
indexed_ids = set()
else:
indexed_ids = set(
d['_id'] for d in response['docs'] if d.get('found'))
documents = [d for d in documents if str(d['_pk']) not in indexed_ids]
if not documents:
log.info('No documents of type `{}` are missing from '
'index `{}`'.format(self.doc_type, self.index_name))
return
self._bulk('index', documents, request)
|
Index documents that are missing from ES index.
Determines which documents are missing using ES `mget` call which
returns a list of document IDs as `documents`. Then missing
`documents` from that list are indexed.
|
entailment
|
def aggregate(self, **params):
""" Perform aggreration
Arguments:
:_aggregations_params: Dict of aggregation params. Root key is an
aggregation name. Required.
:_raise_on_empty: Boolean indicating whether to raise exception
when IndexNotFoundException exception happens. Optional,
defaults to False.
"""
_aggregations_params = params.pop('_aggregations_params', None)
_raise_on_empty = params.pop('_raise_on_empty', False)
if not _aggregations_params:
raise Exception('Missing _aggregations_params')
# Set limit so ES won't complain. It is ignored in the end
params['_limit'] = 0
search_params = self.build_search_params(params)
search_params.pop('size', None)
search_params.pop('from_', None)
search_params.pop('sort', None)
search_params['body']['aggregations'] = _aggregations_params
log.debug('Performing aggregation: {}'.format(_aggregations_params))
try:
response = self.api.search(**search_params)
except IndexNotFoundException:
if _raise_on_empty:
raise JHTTPNotFound(
'Aggregation failed: Index does not exist')
return {}
try:
return response['aggregations']
except KeyError:
raise JHTTPNotFound('No aggregations returned from ES')
|
Perform aggreration
Arguments:
:_aggregations_params: Dict of aggregation params. Root key is an
aggregation name. Required.
:_raise_on_empty: Boolean indicating whether to raise exception
when IndexNotFoundException exception happens. Optional,
defaults to False.
|
entailment
|
def bulk_index_relations(cls, items, request=None, **kwargs):
""" Index objects related to :items: in bulk.
Related items are first grouped in map
{model_name: {item1, item2, ...}} and then indexed.
:param items: Sequence of DB objects related objects if which
should be indexed.
:param request: Pyramid Request instance.
"""
index_map = defaultdict(set)
for item in items:
relations = item.get_related_documents(**kwargs)
for model_cls, related_items in relations:
indexable = getattr(model_cls, '_index_enabled', False)
if indexable and related_items:
index_map[model_cls.__name__].update(related_items)
for model_name, instances in index_map.items():
cls(model_name).index(to_dicts(instances), request=request)
|
Index objects related to :items: in bulk.
Related items are first grouped in map
{model_name: {item1, item2, ...}} and then indexed.
:param items: Sequence of DB objects related objects if which
should be indexed.
:param request: Pyramid Request instance.
|
entailment
|
def get_version(path="src/devpy/__init__.py"):
""" Return the version of by with regex intead of importing it"""
init_content = open(path, "rt").read()
pattern = r"^__version__ = ['\"]([^'\"]*)['\"]"
return re.search(pattern, init_content, re.M).group(1)
|
Return the version of by with regex intead of importing it
|
entailment
|
def add_plugin_arguments(self, parser):
"""Add plugin arguments to argument parser.
Parameters
----------
parser : argparse.ArgumentParser
The main haas ArgumentParser.
"""
for manager in self.hook_managers.values():
if len(list(manager)) == 0:
continue
manager.map(self._add_hook_extension_arguments, parser)
for namespace, manager in self.driver_managers.items():
choices = list(sorted(manager.names()))
if len(choices) == 0:
continue
option, dest = self._namespace_to_option(namespace)
parser.add_argument(
option, help=self._help[namespace], dest=dest,
choices=choices, default='default')
option_prefix = '{0}-'.format(option)
dest_prefix = '{0}_'.format(dest)
manager.map(self._add_driver_extension_arguments,
parser, option_prefix, dest_prefix)
|
Add plugin arguments to argument parser.
Parameters
----------
parser : argparse.ArgumentParser
The main haas ArgumentParser.
|
entailment
|
def get_enabled_hook_plugins(self, hook, args, **kwargs):
"""Get enabled plugins for specified hook name.
"""
manager = self.hook_managers[hook]
if len(list(manager)) == 0:
return []
return [
plugin for plugin in manager.map(
self._create_hook_plugin, args, **kwargs)
if plugin is not None
]
|
Get enabled plugins for specified hook name.
|
entailment
|
def get_driver(self, namespace, parsed_args, **kwargs):
"""Get mutually-exlusive plugin for plugin namespace.
"""
option, dest = self._namespace_to_option(namespace)
dest_prefix = '{0}_'.format(dest)
driver_name = getattr(parsed_args, dest, 'default')
driver_extension = self.driver_managers[namespace][driver_name]
return driver_extension.plugin.from_args(
parsed_args, dest_prefix, **kwargs)
|
Get mutually-exlusive plugin for plugin namespace.
|
entailment
|
def get_description(self):
"""
Get transaction description (for logging purposes)
"""
if self.card:
card_description = self.card.get_description()
else:
card_description = 'Cardless'
if card_description:
card_description += ' | '
return card_description + self.description if self.description else card_description + self.type + ' ' + str(self.IsoMessage.FieldData(11))
|
Get transaction description (for logging purposes)
|
entailment
|
def set_amount(self, amount):
"""
Set transaction amount
"""
if amount:
try:
self.IsoMessage.FieldData(4, int(amount))
except ValueError:
self.IsoMessage.FieldData(4, 0)
self.rebuild()
|
Set transaction amount
|
entailment
|
def set_expected_action(self, expected_response_action):
"""
Expected outcome of the transaction ('APPROVED' or 'DECLINED')
"""
if expected_response_action.upper() not in ['APPROVED', 'APPROVE', 'DECLINED', 'DECLINE']:
return False
self.expected_response_action = expected_response_action.upper()
return True
|
Expected outcome of the transaction ('APPROVED' or 'DECLINED')
|
entailment
|
def build_emv_data(self):
"""
TODO:
95 TVR
82 app_int_prof
"""
emv_data = ''
emv_data += self.TLV.build({'82': self._get_app_interchange_profile()})
emv_data += self.TLV.build({'9A': get_date()})
emv_data += self.TLV.build({'95': self.term.get_tvr()})
emv_data += self.TLV.build({'9F10': self.card.get_iss_application_data()})
emv_data += self.TLV.build({'9F26': self.card.get_application_cryptogram()})
emv_data += self.TLV.build({'9F36': self.card.get_transaction_counter()})
emv_data += self.TLV.build({'9F37': self.term.get_unpredno()})
emv_data += self.TLV.build({'9F1A': self.term.get_country_code()})
return emv_data
|
TODO:
95 TVR
82 app_int_prof
|
entailment
|
def set_currency(self, currency_id):
"""
Set transaction currency code from given currency id, e.g. set 840 from 'USD'
"""
try:
self.currency = currency_codes[currency_id]
self.IsoMessage.FieldData(49, self.currency)
self.rebuild()
except KeyError:
self.currency = None
|
Set transaction currency code from given currency id, e.g. set 840 from 'USD'
|
entailment
|
def finalize(self):
"""Output the aggregate block count results."""
for name, count in sorted(self.blocks.items(), key=lambda x: x[1]):
print('{:3} {}'.format(count, name))
print('{:3} total'.format(sum(self.blocks.values())))
|
Output the aggregate block count results.
|
entailment
|
def analyze(self, scratch, **kwargs):
"""Run and return the results from the BlockCounts plugin."""
file_blocks = Counter()
for script in self.iter_scripts(scratch):
for name, _, _ in self.iter_blocks(script.blocks):
file_blocks[name] += 1
self.blocks.update(file_blocks) # Update the overall count
return {'types': file_blocks}
|
Run and return the results from the BlockCounts plugin.
|
entailment
|
def analyze(self, scratch, **kwargs):
"""Run and return the results form the DeadCode plugin.
The variable_event indicates that the Scratch file contains at least
one instance of a broadcast event based on a variable. When
variable_event is True, dead code scripts reported by this plugin that
begin with a "when I receive" block may not actually indicate dead
code.
"""
self.total_instances += 1
sprites = {}
for sprite, script in self.iter_sprite_scripts(scratch):
if not script.reachable:
sprites.setdefault(sprite, []).append(script)
if sprites:
self.dead_code_instances += 1
import pprint
pprint.pprint(sprites)
variable_event = any(True in self.get_broadcast_events(x) for x in
self.iter_scripts(scratch))
return {'dead_code': {'sprites': sprites,
'variable_event': variable_event}}
|
Run and return the results form the DeadCode plugin.
The variable_event indicates that the Scratch file contains at least
one instance of a broadcast event based on a variable. When
variable_event is True, dead code scripts reported by this plugin that
begin with a "when I receive" block may not actually indicate dead
code.
|
entailment
|
def finalize(self):
"""Output the number of instances that contained dead code."""
if self.total_instances > 1:
print('{} of {} instances contained dead code.'
.format(self.dead_code_instances, self.total_instances))
|
Output the number of instances that contained dead code.
|
entailment
|
def show_help(name):
"""
Show help and basic usage
"""
print('Usage: python3 {} [OPTIONS]... '.format(name))
print('ISO8583 message client')
print(' -v, --verbose\t\tRun transactions verbosely')
print(' -p, --port=[PORT]\t\tTCP port to connect to, 1337 by default')
print(' -s, --server=[IP]\t\tIP of the ISO host to connect to, 127.0.0.1 by default')
print(' -t, --terminal=[ID]\t\tTerminal ID (used in DE 41 ISO field, 10001337 by default)')
print(' -m, --merchant=[ID]\t\tMerchant ID (used in DE 42 ISO field, 999999999999001 by default)')
print(' -k, --terminal-key=[KEY]\t\tTerminal key (\'DEADBEEF DEADBEEF DEADBEEF DEADBEEF\' by default)')
print(' -K, --master-key=[KEY]\t\Master key (\'ABABABAB CDCDCDCD EFEFEFEF AEAEAEAE\' by default)')
print(' -f, --file=[file.xml]\t\tUse transaction data from the given XML-file')
|
Show help and basic usage
|
entailment
|
def _run_interactive(self):
"""
Run transactions interactively (by asking user which transaction to run)
"""
self.term.connect()
self._show_available_transactions()
while True:
trxn_type = self._user_input('\nEnter transaction to send: ')
trxn = ''
data = ''
if trxn_type == 'e':
trxn = Transaction('echo', self.card, self.term)
trxn.trace()
elif trxn_type == 'b':
trxn = Transaction('balance', self.card, self.term)
trxn.set_PIN(self._user_input('Enter PIN: '))
trxn.trace()
elif trxn_type == 'p':
default_amount = 20000
amount = self._user_input('Enter transaction amount ({} by default): '.format(default_amount))
if not amount:
amount = default_amount
trxn = Transaction('purchase', self.card, self.term)
trxn.set_PIN(self._user_input('Enter PIN: '))
trxn.set_amount(amount)
trxn.trace()
elif trxn_type == 'q':
break
else:
print('Unknown transaction. Available transactions are:')
self._show_available_transactions()
continue
self.term.send(trxn.get_data(), show_trace=verbosity)
data = self.term.recv(show_trace=verbosity)
IsoMessage = ISO8583(data[2:], IsoSpec1987BPC())
IsoMessage.Print()
self.term.close()
|
Run transactions interactively (by asking user which transaction to run)
|
entailment
|
def find_top_level_directory(start_directory):
"""Finds the top-level directory of a project given a start directory
inside the project.
Parameters
----------
start_directory : str
The directory in which test discovery will start.
"""
top_level = start_directory
while os.path.isfile(os.path.join(top_level, '__init__.py')):
top_level = os.path.dirname(top_level)
if top_level == os.path.dirname(top_level):
raise ValueError("Can't find top level directory")
return os.path.abspath(top_level)
|
Finds the top-level directory of a project given a start directory
inside the project.
Parameters
----------
start_directory : str
The directory in which test discovery will start.
|
entailment
|
def discover(self, start, top_level_directory=None, pattern='test*.py'):
"""Do test case discovery.
This is the top-level entry-point for test discovery.
If the ``start`` argument is a drectory, then ``haas`` will
discover all tests in the package contained in that directory.
If the ``start`` argument is not a directory, it is assumed to
be a package or module name and tests in the package or module
are loaded.
FIXME: This needs a better description.
Parameters
----------
start : str
The directory, package, module, class or test to load.
top_level_directory : str
The path to the top-level directoy of the project. This is
the parent directory of the project'stop-level Python
package.
pattern : str
The glob pattern to match the filenames of modules to search
for tests.
"""
logger.debug('Starting test discovery')
if os.path.isdir(start):
start_directory = start
return self.discover_by_directory(
start_directory, top_level_directory=top_level_directory,
pattern=pattern)
elif os.path.isfile(start):
start_filepath = start
return self.discover_by_file(
start_filepath, top_level_directory=top_level_directory)
else:
package_or_module = start
return self.discover_by_module(
package_or_module, top_level_directory=top_level_directory,
pattern=pattern)
|
Do test case discovery.
This is the top-level entry-point for test discovery.
If the ``start`` argument is a drectory, then ``haas`` will
discover all tests in the package contained in that directory.
If the ``start`` argument is not a directory, it is assumed to
be a package or module name and tests in the package or module
are loaded.
FIXME: This needs a better description.
Parameters
----------
start : str
The directory, package, module, class or test to load.
top_level_directory : str
The path to the top-level directoy of the project. This is
the parent directory of the project'stop-level Python
package.
pattern : str
The glob pattern to match the filenames of modules to search
for tests.
|
entailment
|
def discover_by_module(self, module_name, top_level_directory=None,
pattern='test*.py'):
"""Find all tests in a package or module, or load a single test case if
a class or test inside a module was specified.
Parameters
----------
module_name : str
The dotted package name, module name or TestCase class and
test method.
top_level_directory : str
The path to the top-level directoy of the project. This is
the parent directory of the project'stop-level Python
package.
pattern : str
The glob pattern to match the filenames of modules to search
for tests.
"""
# If the top level directory is given, the module may only be
# importable with that in the path.
if top_level_directory is not None and \
top_level_directory not in sys.path:
sys.path.insert(0, top_level_directory)
logger.debug('Discovering tests by module: module_name=%r, '
'top_level_directory=%r, pattern=%r', module_name,
top_level_directory, pattern)
try:
module, case_attributes = find_module_by_name(module_name)
except ImportError:
return self.discover_filtered_tests(
module_name, top_level_directory=top_level_directory,
pattern=pattern)
dirname, basename = os.path.split(module.__file__)
basename = os.path.splitext(basename)[0]
if len(case_attributes) == 0 and basename == '__init__':
# Discover in a package
return self.discover_by_directory(
dirname, top_level_directory, pattern=pattern)
elif len(case_attributes) == 0:
# Discover all in a module
return self._loader.load_module(module)
return self.discover_single_case(module, case_attributes)
|
Find all tests in a package or module, or load a single test case if
a class or test inside a module was specified.
Parameters
----------
module_name : str
The dotted package name, module name or TestCase class and
test method.
top_level_directory : str
The path to the top-level directoy of the project. This is
the parent directory of the project'stop-level Python
package.
pattern : str
The glob pattern to match the filenames of modules to search
for tests.
|
entailment
|
def discover_single_case(self, module, case_attributes):
"""Find and load a single TestCase or TestCase method from a module.
Parameters
----------
module : module
The imported Python module containing the TestCase to be
loaded.
case_attributes : list
A list (length 1 or 2) of str. The first component must be
the name of a TestCase subclass. The second component must
be the name of a method in the TestCase.
"""
# Find single case
case = module
loader = self._loader
for index, component in enumerate(case_attributes):
case = getattr(case, component, None)
if case is None:
return loader.create_suite()
elif loader.is_test_case(case):
rest = case_attributes[index + 1:]
if len(rest) > 1:
raise ValueError('Too many components in module path')
elif len(rest) == 1:
return loader.create_suite(
[loader.load_test(case, *rest)])
return loader.load_case(case)
# No cases matched, return empty suite
return loader.create_suite()
|
Find and load a single TestCase or TestCase method from a module.
Parameters
----------
module : module
The imported Python module containing the TestCase to be
loaded.
case_attributes : list
A list (length 1 or 2) of str. The first component must be
the name of a TestCase subclass. The second component must
be the name of a method in the TestCase.
|
entailment
|
def discover_by_directory(self, start_directory, top_level_directory=None,
pattern='test*.py'):
"""Run test discovery in a directory.
Parameters
----------
start_directory : str
The package directory in which to start test discovery.
top_level_directory : str
The path to the top-level directoy of the project. This is
the parent directory of the project'stop-level Python
package.
pattern : str
The glob pattern to match the filenames of modules to search
for tests.
"""
start_directory = os.path.abspath(start_directory)
if top_level_directory is None:
top_level_directory = find_top_level_directory(
start_directory)
logger.debug('Discovering tests in directory: start_directory=%r, '
'top_level_directory=%r, pattern=%r', start_directory,
top_level_directory, pattern)
assert_start_importable(top_level_directory, start_directory)
if top_level_directory not in sys.path:
sys.path.insert(0, top_level_directory)
tests = self._discover_tests(
start_directory, top_level_directory, pattern)
return self._loader.create_suite(list(tests))
|
Run test discovery in a directory.
Parameters
----------
start_directory : str
The package directory in which to start test discovery.
top_level_directory : str
The path to the top-level directoy of the project. This is
the parent directory of the project'stop-level Python
package.
pattern : str
The glob pattern to match the filenames of modules to search
for tests.
|
entailment
|
def discover_by_file(self, start_filepath, top_level_directory=None):
"""Run test discovery on a single file.
Parameters
----------
start_filepath : str
The module file in which to start test discovery.
top_level_directory : str
The path to the top-level directoy of the project. This is
the parent directory of the project'stop-level Python
package.
"""
start_filepath = os.path.abspath(start_filepath)
start_directory = os.path.dirname(start_filepath)
if top_level_directory is None:
top_level_directory = find_top_level_directory(
start_directory)
logger.debug('Discovering tests in file: start_filepath=%r, '
'top_level_directory=', start_filepath,
top_level_directory)
assert_start_importable(top_level_directory, start_directory)
if top_level_directory not in sys.path:
sys.path.insert(0, top_level_directory)
tests = self._load_from_file(
start_filepath, top_level_directory)
return self._loader.create_suite(list(tests))
|
Run test discovery on a single file.
Parameters
----------
start_filepath : str
The module file in which to start test discovery.
top_level_directory : str
The path to the top-level directoy of the project. This is
the parent directory of the project'stop-level Python
package.
|
entailment
|
def _set_options_headers(self, methods):
""" Set proper headers.
Sets following headers:
Allow
Access-Control-Allow-Methods
Access-Control-Allow-Headers
Arguments:
:methods: Sequence of HTTP method names that are value for
requested URI
"""
request = self.request
response = request.response
response.headers['Allow'] = ', '.join(sorted(methods))
if 'Access-Control-Request-Method' in request.headers:
response.headers['Access-Control-Allow-Methods'] = \
', '.join(sorted(methods))
if 'Access-Control-Request-Headers' in request.headers:
response.headers['Access-Control-Allow-Headers'] = \
'origin, x-requested-with, content-type'
return response
|
Set proper headers.
Sets following headers:
Allow
Access-Control-Allow-Methods
Access-Control-Allow-Headers
Arguments:
:methods: Sequence of HTTP method names that are value for
requested URI
|
entailment
|
def _get_handled_methods(self, actions_map):
""" Get names of HTTP methods that can be used at requested URI.
Arguments:
:actions_map: Map of actions. Must have the same structure as
self._item_actions and self._collection_actions
"""
methods = ('OPTIONS',)
defined_actions = []
for action_name in actions_map.keys():
view_method = getattr(self, action_name, None)
method_exists = view_method is not None
method_defined = view_method != self.not_allowed_action
if method_exists and method_defined:
defined_actions.append(action_name)
for action in defined_actions:
methods += actions_map[action]
return methods
|
Get names of HTTP methods that can be used at requested URI.
Arguments:
:actions_map: Map of actions. Must have the same structure as
self._item_actions and self._collection_actions
|
entailment
|
def item_options(self, **kwargs):
""" Handle collection OPTIONS request.
Singular route requests are handled a bit differently because
singular views may handle POST requests despite being registered
as item routes.
"""
actions = self._item_actions.copy()
if self._resource.is_singular:
actions['create'] = ('POST',)
methods = self._get_handled_methods(actions)
return self._set_options_headers(methods)
|
Handle collection OPTIONS request.
Singular route requests are handled a bit differently because
singular views may handle POST requests despite being registered
as item routes.
|
entailment
|
def collection_options(self, **kwargs):
""" Handle collection item OPTIONS request. """
methods = self._get_handled_methods(self._collection_actions)
return self._set_options_headers(methods)
|
Handle collection item OPTIONS request.
|
entailment
|
def wrap(self, func):
""" Wrap :func: to perform aggregation on :func: call.
Should be called with view instance methods.
"""
@six.wraps(func)
def wrapper(*args, **kwargs):
try:
return self.aggregate()
except KeyError:
return func(*args, **kwargs)
return wrapper
|
Wrap :func: to perform aggregation on :func: call.
Should be called with view instance methods.
|
entailment
|
def pop_aggregations_params(self):
""" Pop and return aggregation params from query string params.
Aggregation params are expected to be prefixed(nested under) by
any of `self._aggregations_keys`.
"""
from nefertari.view import BaseView
self._query_params = BaseView.convert_dotted(self.view._query_params)
for key in self._aggregations_keys:
if key in self._query_params:
return self._query_params.pop(key)
else:
raise KeyError('Missing aggregation params')
|
Pop and return aggregation params from query string params.
Aggregation params are expected to be prefixed(nested under) by
any of `self._aggregations_keys`.
|
entailment
|
def get_aggregations_fields(cls, params):
""" Recursively get values under the 'field' key.
Is used to get names of fields on which aggregations should be
performed.
"""
fields = []
for key, val in params.items():
if isinstance(val, dict):
fields += cls.get_aggregations_fields(val)
if key == 'field':
fields.append(val)
return fields
|
Recursively get values under the 'field' key.
Is used to get names of fields on which aggregations should be
performed.
|
entailment
|
def check_aggregations_privacy(self, aggregations_params):
""" Check per-field privacy rules in aggregations.
Privacy is checked by making sure user has access to the fields
used in aggregations.
"""
fields = self.get_aggregations_fields(aggregations_params)
fields_dict = dictset.fromkeys(fields)
fields_dict['_type'] = self.view.Model.__name__
try:
validate_data_privacy(self.view.request, fields_dict)
except wrappers.ValidationError as ex:
raise JHTTPForbidden(
'Not enough permissions to aggregate on '
'fields: {}'.format(ex))
|
Check per-field privacy rules in aggregations.
Privacy is checked by making sure user has access to the fields
used in aggregations.
|
entailment
|
def aggregate(self):
""" Perform aggregation and return response. """
from nefertari.elasticsearch import ES
aggregations_params = self.pop_aggregations_params()
if self.view._auth_enabled:
self.check_aggregations_privacy(aggregations_params)
self.stub_wrappers()
return ES(self.view.Model.__name__).aggregate(
_aggregations_params=aggregations_params,
**self._query_params)
|
Perform aggregation and return response.
|
entailment
|
def asdict(self, name, _type=None, _set=False):
"""
Turn this 'a:2,b:blabla,c:True,a:'d' to
{a:[2, 'd'], b:'blabla', c:True}
"""
if _type is None:
_type = lambda t: t
dict_str = self.pop(name, None)
if not dict_str:
return {}
_dict = {}
for item in split_strip(dict_str):
key, _, val = item.partition(':')
val = _type(val)
if key in _dict:
if isinstance(_dict[key], list):
_dict[key].append(val)
else:
_dict[key] = [_dict[key], val]
else:
_dict[key] = val
if _set:
self[name] = _dict
return _dict
|
Turn this 'a:2,b:blabla,c:True,a:'d' to
{a:[2, 'd'], b:'blabla', c:True}
|
entailment
|
def get_random_hex(length):
"""
Return random hex string of a given length
"""
if length <= 0:
return ''
return hexify(random.randint(pow(2, length*2), pow(2, length*4)))[0:length]
|
Return random hex string of a given length
|
entailment
|
def get_response(_code):
"""
Return xx1x response for xx0x codes (e.g. 0810 for 0800)
"""
if _code:
code = str(_code)
return code[:-2] + str(int(code[-2:-1]) + 1) + code[-1]
else:
return None
|
Return xx1x response for xx0x codes (e.g. 0810 for 0800)
|
entailment
|
def load_case(self, testcase):
"""Load a TestSuite containing all TestCase instances for all tests in
a TestCase subclass.
Parameters
----------
testcase : type
A subclass of :class:`unittest.TestCase`
"""
tests = [self.load_test(testcase, name)
for name in self.find_test_method_names(testcase)]
return self.create_suite(tests)
|
Load a TestSuite containing all TestCase instances for all tests in
a TestCase subclass.
Parameters
----------
testcase : type
A subclass of :class:`unittest.TestCase`
|
entailment
|
def load_module(self, module):
"""Create and return a test suite containing all cases loaded from the
provided module.
Parameters
----------
module : module
A module object containing ``TestCases``
"""
cases = self.get_test_cases_from_module(module)
suites = [self.load_case(case) for case in cases]
return self.create_suite(suites)
|
Create and return a test suite containing all cases loaded from the
provided module.
Parameters
----------
module : module
A module object containing ``TestCases``
|
entailment
|
def Field(self, field, Value = None):
'''
Add field to bitmap
'''
if Value == None:
try:
return self.__Bitmap[field]
except KeyError:
return None
elif Value == 1 or Value == 0:
self.__Bitmap[field] = Value
else:
raise ValueError
|
Add field to bitmap
|
entailment
|
def FieldData(self, field, Value = None):
'''
Add field data
'''
if Value == None:
try:
return self.__FieldData[field]
except KeyError:
return None
else:
if len(str(Value)) > self.__IsoSpec.MaxLength(field):
raise ValueError('Value length larger than field maximum ({0})'.format(self.__IsoSpec.MaxLength(field)))
self.Field(field, Value=1)
self.__FieldData[field] = Value
|
Add field data
|
entailment
|
def authenticated_userid(request):
"""Helper function that can be used in ``db_key`` to support `self`
as a collection key.
"""
user = getattr(request, 'user', None)
key = user.pk_field()
return getattr(user, key)
|
Helper function that can be used in ``db_key`` to support `self`
as a collection key.
|
entailment
|
def iter_blocks(block_list):
"""A generator for blocks contained in a block list.
Yields tuples containing the block name, the depth that the block was
found at, and finally a handle to the block itself.
"""
# queue the block and the depth of the block
queue = [(block, 0) for block in block_list
if isinstance(block, kurt.Block)]
while queue:
block, depth = queue.pop(0)
assert block.type.text
yield block.type.text, depth, block
for arg in block.args:
if hasattr(arg, '__iter__'):
queue[0:0] = [(x, depth + 1) for x in arg
if isinstance(x, kurt.Block)]
elif isinstance(arg, kurt.Block):
queue.append((arg, depth))
|
A generator for blocks contained in a block list.
Yields tuples containing the block name, the depth that the block was
found at, and finally a handle to the block itself.
|
entailment
|
def iter_scripts(scratch):
"""A generator for all scripts contained in a scratch file.
yields stage scripts first, then scripts for each sprite
"""
for script in scratch.stage.scripts:
if not isinstance(script, kurt.Comment):
yield script
for sprite in scratch.sprites:
for script in sprite.scripts:
if not isinstance(script, kurt.Comment):
yield script
|
A generator for all scripts contained in a scratch file.
yields stage scripts first, then scripts for each sprite
|
entailment
|
def iter_sprite_scripts(scratch):
"""A generator for all scripts contained in a scratch file.
yields stage scripts first, then scripts for each sprite
"""
for script in scratch.stage.scripts:
if not isinstance(script, kurt.Comment):
yield ('Stage', script)
for sprite in scratch.sprites:
for script in sprite.scripts:
if not isinstance(script, kurt.Comment):
yield (sprite.name, script)
|
A generator for all scripts contained in a scratch file.
yields stage scripts first, then scripts for each sprite
|
entailment
|
def script_start_type(script):
"""Return the type of block the script begins with."""
if script[0].type.text == 'when @greenFlag clicked':
return HairballPlugin.HAT_GREEN_FLAG
elif script[0].type.text == 'when I receive %s':
return HairballPlugin.HAT_WHEN_I_RECEIVE
elif script[0].type.text == 'when this sprite clicked':
return HairballPlugin.HAT_MOUSE
elif script[0].type.text == 'when %s key pressed':
return HairballPlugin.HAT_KEY
else:
return HairballPlugin.NO_HAT
|
Return the type of block the script begins with.
|
entailment
|
def get_broadcast_events(cls, script):
"""Return a Counter of event-names that were broadcast.
The Count will contain the key True if any of the broadcast blocks
contain a parameter that is a variable.
"""
events = Counter()
for name, _, block in cls.iter_blocks(script):
if 'broadcast %s' in name:
if isinstance(block.args[0], kurt.Block):
events[True] += 1
else:
events[block.args[0].lower()] += 1
return events
|
Return a Counter of event-names that were broadcast.
The Count will contain the key True if any of the broadcast blocks
contain a parameter that is a variable.
|
entailment
|
def tag_reachable_scripts(cls, scratch):
"""Tag each script with attribute reachable.
The reachable attribute will be set false for any script that does not
begin with a hat block. Additionally, any script that begins with a
'when I receive' block whose event-name doesn't appear in a
corresponding broadcast block is marked as unreachable.
"""
if getattr(scratch, 'hairball_prepared', False): # Only process once
return
reachable = set()
untriggered_events = {}
# Initial pass to find reachable and potentially reachable scripts
for script in cls.iter_scripts(scratch):
if not isinstance(script, kurt.Comment):
starting_type = cls.script_start_type(script)
if starting_type == cls.NO_HAT:
script.reachable = False
elif starting_type == cls.HAT_WHEN_I_RECEIVE:
# Value will be updated if reachable
script.reachable = False
message = script[0].args[0].lower()
untriggered_events.setdefault(message, set()).add(script)
else:
script.reachable = True
reachable.add(script)
# Expand reachable states based on broadcast events
while reachable:
for event in cls.get_broadcast_events(reachable.pop()):
if event in untriggered_events:
for script in untriggered_events.pop(event):
script.reachable = True
reachable.add(script)
scratch.hairball_prepared = True
|
Tag each script with attribute reachable.
The reachable attribute will be set false for any script that does not
begin with a hat block. Additionally, any script that begins with a
'when I receive' block whose event-name doesn't appear in a
corresponding broadcast block is marked as unreachable.
|
entailment
|
def description(self):
"""Attribute that returns the plugin description from its docstring."""
lines = []
for line in self.__doc__.split('\n')[2:]:
line = line.strip()
if line:
lines.append(line)
return ' '.join(lines)
|
Attribute that returns the plugin description from its docstring.
|
entailment
|
def _process(self, scratch, filename, **kwargs):
"""Internal hook that marks reachable scripts before calling analyze.
Returns data exactly as returned by the analyze method.
"""
self.tag_reachable_scripts(scratch)
return self.analyze(scratch, filename=filename, **kwargs)
|
Internal hook that marks reachable scripts before calling analyze.
Returns data exactly as returned by the analyze method.
|
entailment
|
def close(self):
"""
Finalises the compressed version of the spreadsheet. If you aren't using the context manager ('with' statement,
you must call this manually, it is not triggered automatically like on a file object.
:return: Nothing.
"""
self.zipf.writestr("content.xml", self.dom.toxml().encode("utf-8"))
self.zipf.close()
|
Finalises the compressed version of the spreadsheet. If you aren't using the context manager ('with' statement,
you must call this manually, it is not triggered automatically like on a file object.
:return: Nothing.
|
entailment
|
def writerow(self, cells):
"""
Write a row of cells into the default sheet of the spreadsheet.
:param cells: A list of cells (most basic Python types supported).
:return: Nothing.
"""
if self.default_sheet is None:
self.default_sheet = self.new_sheet()
self.default_sheet.writerow(cells)
|
Write a row of cells into the default sheet of the spreadsheet.
:param cells: A list of cells (most basic Python types supported).
:return: Nothing.
|
entailment
|
def new_sheet(self, name=None, cols=None):
"""
Create a new sheet in the spreadsheet and return it so content can be added.
:param name: Optional name for the sheet.
:param cols: Specify the number of columns, needed for compatibility in some cases
:return: Sheet object
"""
sheet = Sheet(self.dom, name, cols)
self.sheets.append(sheet)
return sheet
|
Create a new sheet in the spreadsheet and return it so content can be added.
:param name: Optional name for the sheet.
:param cols: Specify the number of columns, needed for compatibility in some cases
:return: Sheet object
|
entailment
|
def _format_kwargs(func):
"""Decorator to handle formatting kwargs to the proper names expected by
the associated function. The formats dictionary string keys will be used as
expected function kwargs and the value list of strings will be renamed to
the associated key string."""
formats = {}
formats['blk'] = ["blank"]
formats['dft'] = ["default"]
formats['hdr'] = ["header"]
formats['hlp'] = ["help"]
formats['msg'] = ["message"]
formats['shw'] = ["show"]
formats['vld'] = ["valid"]
@wraps(func)
def inner(*args, **kwargs):
for k in formats.keys():
for v in formats[k]:
if v in kwargs:
kwargs[k] = kwargs[v]
kwargs.pop(v)
return func(*args, **kwargs)
return inner
|
Decorator to handle formatting kwargs to the proper names expected by
the associated function. The formats dictionary string keys will be used as
expected function kwargs and the value list of strings will be renamed to
the associated key string.
|
entailment
|
def show_limit(entries, **kwargs):
"""Shows a menu but limits the number of entries shown at a time.
Functionally equivalent to `show_menu()` with the `limit` parameter set."""
limit = kwargs.pop('limit', 5)
if limit <= 0:
return show_menu(entries, **kwargs)
istart = 0 # Index of group start.
iend = limit # Index of group end.
dft = kwargs.pop('dft', None)
if type(dft) == int:
dft = str(dft)
while True:
if iend > len(entries):
iend = len(entries)
istart = iend - limit
if istart < 0:
istart = 0
iend = limit
unext = len(entries) - iend # Number of next entries.
uprev = istart # Number of previous entries.
nnext = "" # Name of 'next' menu entry.
nprev = "" # Name of 'prev' menu entry.
dnext = "" # Description of 'next' menu entry.
dprev = "" # Description of 'prev' menu entry.
group = copy.deepcopy(entries[istart:iend])
names = [i.name for i in group]
if unext > 0:
for i in ["n", "N", "next", "NEXT", "->", ">>", ">>>"]:
if i not in names:
nnext = i
dnext = "Next %u of %u entries" % (unext, len(entries))
group.append(MenuEntry(nnext, dnext, None, None, None))
names.append("n")
break
if uprev > 0:
for i in ["p", "P", "prev", "PREV", "<-", "<<", "<<<"]:
if i not in names:
nprev = i
dprev = "Previous %u of %u entries" % (uprev, len(entries))
group.append(MenuEntry(nprev, dprev, None, None, None))
names.append("p")
break
tmpdft = None
if dft != None:
if dft not in names:
if "n" in names:
tmpdft = "n"
else:
tmpdft = dft
result = show_menu(group, dft=tmpdft, **kwargs)
if result == nnext or result == dnext:
istart += limit
iend += limit
elif result == nprev or result == dprev:
istart -= limit
iend -= limit
else:
return result
|
Shows a menu but limits the number of entries shown at a time.
Functionally equivalent to `show_menu()` with the `limit` parameter set.
|
entailment
|
def show_menu(entries, **kwargs):
"""Shows a menu with the given list of `MenuEntry` items.
**Params**:
- header (str) - String to show above menu.
- note (str) - String to show as a note below menu.
- msg (str) - String to show below menu.
- dft (str) - Default value if input is left blank.
- compact (bool) - If true, the menu items will not be displayed
[default: False].
- returns (str) - Controls what part of the menu entry is returned,
'func' returns function result [default: name].
- limit (int) - If set, limits the number of menu entries show at a time
[default: None].
- fzf (bool) - If true, can enter FCHR at the menu prompt to search menu.
"""
global _AUTO
hdr = kwargs.get('hdr', "")
note = kwargs.get('note', "")
dft = kwargs.get('dft', "")
fzf = kwargs.pop('fzf', True)
compact = kwargs.get('compact', False)
returns = kwargs.get('returns', "name")
limit = kwargs.get('limit', None)
dft = kwargs.get('dft', None)
msg = []
if limit:
return show_limit(entries, **kwargs)
def show_banner():
banner = "-- MENU"
if hdr:
banner += ": " + hdr
banner += " --"
msg.append(banner)
if _AUTO:
return
for i in entries:
msg.append(" (%s) %s" % (i.name, i.desc))
valid = [i.name for i in entries]
if type(dft) == int:
dft = str(dft)
if dft not in valid:
dft = None
if not compact:
show_banner()
if note and not _AUTO:
msg.append("[!] " + note)
if fzf:
valid.append(FCHR)
msg.append(QSTR + kwargs.get('msg', "Enter menu selection"))
msg = os.linesep.join(msg)
entry = None
while entry not in entries:
choice = ask(msg, vld=valid, dft=dft, qstr=False)
if choice == FCHR and fzf:
try:
from iterfzf import iterfzf
choice = iterfzf(reversed(["%s\t%s" % (i.name, i.desc) for i in entries])).strip("\0").split("\t", 1)[0]
except:
warn("Issue encountered during fzf search.")
match = [i for i in entries if i.name == choice]
if match:
entry = match[0]
if entry.func:
fresult = run_func(entry)
if "func" == returns:
return fresult
try:
return getattr(entry, returns)
except:
return getattr(entry, "name")
|
Shows a menu with the given list of `MenuEntry` items.
**Params**:
- header (str) - String to show above menu.
- note (str) - String to show as a note below menu.
- msg (str) - String to show below menu.
- dft (str) - Default value if input is left blank.
- compact (bool) - If true, the menu items will not be displayed
[default: False].
- returns (str) - Controls what part of the menu entry is returned,
'func' returns function result [default: name].
- limit (int) - If set, limits the number of menu entries show at a time
[default: None].
- fzf (bool) - If true, can enter FCHR at the menu prompt to search menu.
|
entailment
|
def run_func(entry):
"""Runs the function associated with the given MenuEntry."""
if entry.func:
if entry.args and entry.krgs:
return entry.func(*entry.args, **entry.krgs)
if entry.args:
return entry.func(*entry.args)
if entry.krgs:
return entry.func(**entry.krgs)
return entry.func()
|
Runs the function associated with the given MenuEntry.
|
entailment
|
def enum_menu(strs, menu=None, *args, **kwargs):
"""Enumerates the given list of strings into returned menu.
**Params**:
- menu (Menu) - Existing menu to append. If not provided, a new menu will
be created.
"""
if not menu:
menu = Menu(*args, **kwargs)
for s in strs:
menu.enum(s)
return menu
|
Enumerates the given list of strings into returned menu.
**Params**:
- menu (Menu) - Existing menu to append. If not provided, a new menu will
be created.
|
entailment
|
def ask(msg="Enter input", fmt=None, dft=None, vld=None, shw=True, blk=False, hlp=None, qstr=True):
"""Prompts the user for input and returns the given answer. Optionally
checks if answer is valid.
**Params**:
- msg (str) - Message to prompt the user with.
- fmt (func) - Function used to format user input.
- dft (int|float|str) - Default value if input is left blank.
- vld ([int|float|str|func]) - Valid input entries.
- shw (bool) - If true, show the user's input as typed.
- blk (bool) - If true, accept a blank string as valid input. Note that
supplying a default value will disable accepting blank input.
"""
global _AUTO
def print_help():
lst = [v for v in vld if not callable(v)]
if blk:
lst.remove("")
for v in vld:
if not callable(v):
continue
if int == v:
lst.append("<int>")
elif float == v:
lst.append("<float>")
elif str == v:
lst.append("<str>")
else:
lst.append("(" + v.__name__ + ")")
if lst:
echo("[HELP] Valid input: %s" % (" | ".join([str(l) for l in lst])))
if hlp:
echo("[HELP] Extra notes: " + hlp)
if blk:
echo("[HELP] Input may be blank.")
vld = vld or []
hlp = hlp or ""
if not hasattr(vld, "__iter__"):
vld = [vld]
if not hasattr(fmt, "__call__"):
fmt = lambda x: x # NOTE: Defaults to function that does nothing.
msg = "%s%s" % (QSTR if qstr else "", msg)
dft = fmt(dft) if dft != None else None # Prevents showing [None] default.
if dft != None:
msg += " [%s]" % (dft if type(dft) is str else repr(dft))
vld.append(dft)
blk = False
if vld:
# Sanitize valid inputs.
vld = list(set([fmt(v) if fmt(v) else v for v in vld]))
if blk and "" not in vld:
vld.append("")
# NOTE: The following fixes a Py3 related bug found in `0.8.1`.
try: vld = sorted(vld)
except: pass
msg += ISTR
ans = None
while ans is None:
get_input = _input if shw else getpass
ans = get_input(msg)
if _AUTO:
echo(ans)
if "?" == ans:
print_help()
ans = None
continue
if "" == ans:
if dft != None:
ans = dft if not fmt else fmt(dft)
break
if "" not in vld:
ans = None
continue
try:
ans = ans if not fmt else fmt(ans)
except:
ans = None
if vld:
for v in vld:
if type(v) is type and cast(ans, v) is not None:
ans = cast(ans, v)
break
elif hasattr(v, "__call__"):
try:
if v(ans):
break
except:
pass
elif ans in vld:
break
else:
ans = None
return ans
|
Prompts the user for input and returns the given answer. Optionally
checks if answer is valid.
**Params**:
- msg (str) - Message to prompt the user with.
- fmt (func) - Function used to format user input.
- dft (int|float|str) - Default value if input is left blank.
- vld ([int|float|str|func]) - Valid input entries.
- shw (bool) - If true, show the user's input as typed.
- blk (bool) - If true, accept a blank string as valid input. Note that
supplying a default value will disable accepting blank input.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.