sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def is_owner(self, user):
"""
Checks if user is the owner of object
Parameters
----------
user: get_user_model() instance
Returns
-------
bool
Author
------
Himanshu Shankar (https://himanshus.com)
"""
if user.is_authenticated:
return self.created_by.id == user.id
return False | Checks if user is the owner of object
Parameters
----------
user: get_user_model() instance
Returns
-------
bool
Author
------
Himanshu Shankar (https://himanshus.com) | entailment |
def run(xml_report_dir, xml_report_filter='TEST-', html_report_path='.',
generate_exec_time_graphs=True, html_report_dir='report.th',
initial_java_heap_size=None, maximum_java_heap_size=None):
""" Use UnitTH to generate a test history report
Args:
xml_report_dir (:obj:`str`): Parent directory of XML reports of individual builds to generate a history report of
xml_report_filter (:obj:`str`, optional): Starts-with filter for individual reports with `xml_report_dir` that should
be included in the history report. Set `xml_report_filter` to '' to include all files/subdirectories in the history
report.
html_report_path (:obj:`str`, optional): Directory of HTML reports of individual builds (relative to XML directories of
individual builds)
generate_exec_time_graphs (:obj:`bool`, optional): Whether execution time graphs shall be generated
html_report_dir (:obj:`str`, optional): directory to store generated HTML history report
initial_java_heap_size (:obj:`str`, optional): initial Java heap size
maximum_java_heap_size (:obj:`str`, optional): maximum Java heap size
"""
cmd = []
cmd.append('java')
if initial_java_heap_size:
cmd.append('-Xms{}'.format(initial_java_heap_size))
if maximum_java_heap_size:
cmd.append('-Xmx{}'.format(maximum_java_heap_size))
cmd.append('-Dunitth.xml.report.filter={}'.format(xml_report_filter))
cmd.append('-Dunitth.html.report.path={}'.format(html_report_path))
cmd.append('-Dunitth.generate.exectimegraphs={}'.format('{}'.format(generate_exec_time_graphs).lower()))
cmd.append('-Dunitth.report.dir={}'.format(html_report_dir))
cmd.append('-jar')
cmd.append('"{}"'.format(resource_filename('unitth', 'lib/unitth/unitth.jar')))
cmd.append(xml_report_dir)
subprocess.check_call(' '.join(cmd), shell=True) | Use UnitTH to generate a test history report
Args:
xml_report_dir (:obj:`str`): Parent directory of XML reports of individual builds to generate a history report of
xml_report_filter (:obj:`str`, optional): Starts-with filter for individual reports with `xml_report_dir` that should
be included in the history report. Set `xml_report_filter` to '' to include all files/subdirectories in the history
report.
html_report_path (:obj:`str`, optional): Directory of HTML reports of individual builds (relative to XML directories of
individual builds)
generate_exec_time_graphs (:obj:`bool`, optional): Whether execution time graphs shall be generated
html_report_dir (:obj:`str`, optional): directory to store generated HTML history report
initial_java_heap_size (:obj:`str`, optional): initial Java heap size
maximum_java_heap_size (:obj:`str`, optional): maximum Java heap size | entailment |
def get_counter(data, base):
"""
See setCounters() / getCounters() methods in IJ source, ij/gui/PointRoi.java.
"""
b0 = data[base]
b1 = data[base + 1]
b2 = data[base + 2]
b3 = data[base + 3]
counter = b3
position = (b1 << 8) + b2
return counter, position | See setCounters() / getCounters() methods in IJ source, ij/gui/PointRoi.java. | entailment |
def get_delete(url, id, method, key=None):
"""
Get or delete, just change the method: "GET" or "DELETE".
"""
return Util(
url=url + id + "/",
method=method,
key=key,
).json_result() | Get or delete, just change the method: "GET" or "DELETE". | entailment |
def get_user(self, login):
""" http://confluence.jetbrains.net/display/YTD2/GET+user
"""
return youtrack.User(self._get("/admin/user/" + urlquote(login.encode('utf8'))), self) | http://confluence.jetbrains.net/display/YTD2/GET+user | entailment |
def import_users(self, users):
""" Import users, returns import result (http://confluence.jetbrains.net/display/YTD2/Import+Users)
Example: importUsers([{'login':'vadim', 'fullName':'vadim', 'email':'eee@ss.com', 'jabber':'fff@fff.com'},
{'login':'maxim', 'fullName':'maxim', 'email':'aaa@ss.com', 'jabber':'www@fff.com'}])
"""
if len(users) <= 0:
return
known_attrs = ('login', 'fullName', 'email', 'jabber')
xml = '<list>\n'
for u in users:
xml += ' <user ' + "".join(k + '=' + quoteattr(u[k]) + ' ' for k in u if k in known_attrs) + '/>\n'
xml += '</list>'
# TODO: convert response xml into python objects
if isinstance(xml, str):
xml = xml.encode('utf-8')
return self._req_xml('PUT', '/import/users', xml, 400).toxml() | Import users, returns import result (http://confluence.jetbrains.net/display/YTD2/Import+Users)
Example: importUsers([{'login':'vadim', 'fullName':'vadim', 'email':'eee@ss.com', 'jabber':'fff@fff.com'},
{'login':'maxim', 'fullName':'maxim', 'email':'aaa@ss.com', 'jabber':'www@fff.com'}]) | entailment |
def import_links(self, links):
""" Import links, returns import result (http://confluence.jetbrains.net/display/YTD2/Import+Links)
Accepts result of getLinks()
Example: importLinks([{'login':'vadim', 'fullName':'vadim', 'email':'eee@ss.com', 'jabber':'fff@fff.com'},
{'login':'maxim', 'fullName':'maxim', 'email':'aaa@ss.com', 'jabber':'www@fff.com'}])
"""
xml = '<list>\n'
for l in links:
# ignore typeOutward and typeInward returned by getLinks()
xml += ' <link ' + "".join(attr + '=' + quoteattr(l[attr]) +
' ' for attr in l if attr not in ['typeInward', 'typeOutward']) + '/>\n'
xml += '</list>'
# TODO: convert response xml into python objects
res = self._req_xml('PUT', '/import/links', xml, 400)
return res.toxml() if hasattr(res, "toxml") else res | Import links, returns import result (http://confluence.jetbrains.net/display/YTD2/Import+Links)
Accepts result of getLinks()
Example: importLinks([{'login':'vadim', 'fullName':'vadim', 'email':'eee@ss.com', 'jabber':'fff@fff.com'},
{'login':'maxim', 'fullName':'maxim', 'email':'aaa@ss.com', 'jabber':'www@fff.com'}]) | entailment |
def import_issues(self, project_id, assignee_group, issues):
""" Import issues, returns import result (http://confluence.jetbrains.net/display/YTD2/Import+Issues)
Accepts return of getIssues()
Example: importIssues([{'numberInProject':'1', 'summary':'some problem', 'description':'some description',
'priority':'1',
'fixedVersion':['1.0', '2.0'],
'comment':[{'author':'yamaxim', 'text':'comment text', 'created':'1267030230127'}]},
{'numberInProject':'2', 'summary':'some problem', 'description':'some description',
'priority':'1'}])
"""
if len(issues) <= 0:
return
bad_fields = ['id', 'projectShortName', 'votes', 'commentsCount',
'historyUpdated', 'updatedByFullName', 'updaterFullName',
'reporterFullName', 'links', 'attachments', 'jiraId',
'entityId', 'tags', 'sprint']
tt_settings = self.get_project_time_tracking_settings(project_id)
if tt_settings and tt_settings['Enabled'] and tt_settings['TimeSpentField']:
bad_fields.append(tt_settings['TimeSpentField'])
xml = '<issues>\n'
issue_records = dict([])
for issue in issues:
record = ""
record += ' <issue>\n'
comments = None
if getattr(issue, "getComments", None):
comments = issue.get_comments()
for issue_attr in issue:
attr_value = issue[issue_attr]
if attr_value is None:
continue
if isinstance(attr_value, str):
attr_value = attr_value.encode('utf-8')
if isinstance(issue_attr, str):
issue_attr = issue_attr.encode('utf-8')
if issue_attr == 'comments':
comments = attr_value
else:
# ignore bad fields from getIssue()
if issue_attr not in bad_fields:
record += ' <field name="' + issue_attr + '">\n'
if isinstance(attr_value, list) or getattr(attr_value, '__iter__', False):
for v in attr_value:
if isinstance(v, str):
v = v.encode('utf-8')
record += ' <value>' + escape(v.strip()) + '</value>\n'
else:
record += ' <value>' + escape(attr_value.strip()) + '</value>\n'
record += ' </field>\n'
if comments:
for comment in comments:
record += ' <comment'
for ca in comment:
val = comment[ca]
if isinstance(ca, str):
ca = ca.encode('utf-8')
if isinstance(val, str):
val = val.encode('utf-8')
record += ' ' + ca + '=' + quoteattr(val)
record += '/>\n'
record += ' </issue>\n'
xml += record
issue_records[issue.numberInProject] = record
xml += '</issues>'
# print xml
# TODO: convert response xml into python objects
if isinstance(xml, str):
xml = xml.encode('utf-8')
if isinstance(assignee_group, str):
assignee_group = assignee_group.encode('utf-8')
url = '/import/' + urlquote(project_id) + '/issues?' + urllib.parse.urlencode({'assigneeGroup': assignee_group})
if isinstance(url, str):
url = url.encode('utf-8')
result = self._req_xml('PUT', url, xml, 400)
if (result == "") and (len(issues) > 1):
for issue in issues:
self.import_issues(project_id, assignee_group, [issue])
response = ""
try:
response = result.toxml().encode('utf-8')
except youtrack.YouTrackBroadException:
sys.stderr.write("can't parse response")
sys.stderr.write("request was")
sys.stderr.write(xml)
return response
item_elements = minidom.parseString(response).getElementsByTagName("item")
if len(item_elements) != len(issues):
sys.stderr.write(response)
else:
for item in item_elements:
_id = item.attributes["id"].value
imported = item.attributes["imported"].value.lower()
if imported == "true":
print("Issue [ %s-%s ] imported successfully" % (project_id, _id))
else:
sys.stderr.write("")
sys.stderr.write("Failed to import issue [ %s-%s ]." % (project_id, _id))
sys.stderr.write("Reason : ")
sys.stderr.write(item.toxml())
sys.stderr.write("Request was :")
if isinstance(issue_records[_id], str):
sys.stderr.write(issue_records[_id].encode('utf-8'))
else:
sys.stderr.write(issue_records[_id])
print("")
return response | Import issues, returns import result (http://confluence.jetbrains.net/display/YTD2/Import+Issues)
Accepts return of getIssues()
Example: importIssues([{'numberInProject':'1', 'summary':'some problem', 'description':'some description',
'priority':'1',
'fixedVersion':['1.0', '2.0'],
'comment':[{'author':'yamaxim', 'text':'comment text', 'created':'1267030230127'}]},
{'numberInProject':'2', 'summary':'some problem', 'description':'some description',
'priority':'1'}]) | entailment |
def get_project(self, project_id):
""" http://confluence.jetbrains.net/display/YTD2/GET+project
"""
return youtrack.Project(self._get("/admin/project/" + urlquote(project_id)), self) | http://confluence.jetbrains.net/display/YTD2/GET+project | entailment |
def create_subsystems(self, project_id, subsystems):
""" Accepts result of getSubsystems()
"""
for s in subsystems:
self.create_subsystem(project_id, s) | Accepts result of getSubsystems() | entailment |
def create_versions(self, project_id, versions):
""" Accepts result of getVersions()
"""
for v in versions:
self.create_version(project_id, v) | Accepts result of getVersions() | entailment |
def run_cmd(call, cmd, *, echo=True, **kwargs):
"""Run a command and echo it first"""
if echo:
print('$> ' + ' '.join(map(pipes.quote, cmd)))
return call(cmd, **kwargs) | Run a command and echo it first | entailment |
def git_remote(git_repo):
"""Return the URL for remote git repository.
Depending on the system setup it returns ssh or https remote.
"""
github_token = os.getenv(GITHUB_TOKEN_KEY)
if github_token:
return 'https://{0}@github.com/{1}'.format(
github_token, git_repo)
return 'git@github.com:{0}'.format(git_repo) | Return the URL for remote git repository.
Depending on the system setup it returns ssh or https remote. | entailment |
def render_build_args(options, ns):
"""Get docker build args dict, rendering any templated args.
Args:
options (dict):
The dictionary for a given image from chartpress.yaml.
Fields in `options['buildArgs']` will be rendered and returned,
if defined.
ns (dict): the namespace used when rendering templated arguments
"""
build_args = options.get('buildArgs', {})
for key, value in build_args.items():
build_args[key] = value.format(**ns)
return build_args | Get docker build args dict, rendering any templated args.
Args:
options (dict):
The dictionary for a given image from chartpress.yaml.
Fields in `options['buildArgs']` will be rendered and returned,
if defined.
ns (dict): the namespace used when rendering templated arguments | entailment |
def build_image(image_path, image_name, build_args=None, dockerfile_path=None):
"""Build an image
Args:
image_path (str): the path to the image directory
image_name (str): image 'name:tag' to build
build_args (dict, optional): dict of docker build arguments
dockerfile_path (str, optional):
path to dockerfile relative to image_path
if not `image_path/Dockerfile`.
"""
cmd = ['docker', 'build', '-t', image_name, image_path]
if dockerfile_path:
cmd.extend(['-f', dockerfile_path])
for k, v in (build_args or {}).items():
cmd += ['--build-arg', '{}={}'.format(k, v)]
check_call(cmd) | Build an image
Args:
image_path (str): the path to the image directory
image_name (str): image 'name:tag' to build
build_args (dict, optional): dict of docker build arguments
dockerfile_path (str, optional):
path to dockerfile relative to image_path
if not `image_path/Dockerfile`. | entailment |
def image_needs_pushing(image):
"""Return whether an image needs pushing
Args:
image (str): the `repository:tag` image to be build.
Returns:
True: if image needs to be pushed (not on registry)
False: if not (already present on registry)
"""
d = docker_client()
try:
d.images.get_registry_data(image)
except docker.errors.APIError:
# image not found on registry, needs pushing
return True
else:
return False | Return whether an image needs pushing
Args:
image (str): the `repository:tag` image to be build.
Returns:
True: if image needs to be pushed (not on registry)
False: if not (already present on registry) | entailment |
def image_needs_building(image):
"""Return whether an image needs building
Checks if the image exists (ignores commit range),
either locally or on the registry.
Args:
image (str): the `repository:tag` image to be build.
Returns:
True: if image needs to be built
False: if not (image already exists)
"""
d = docker_client()
# first, check for locally built image
try:
d.images.get(image)
except docker.errors.ImageNotFound:
# image not found, check registry
pass
else:
# it exists locally, no need to check remote
return False
# image may need building if it's not on the registry
return image_needs_pushing(image) | Return whether an image needs building
Checks if the image exists (ignores commit range),
either locally or on the registry.
Args:
image (str): the `repository:tag` image to be build.
Returns:
True: if image needs to be built
False: if not (image already exists) | entailment |
def build_images(prefix, images, tag=None, commit_range=None, push=False, chart_version=None):
"""Build a collection of docker images
Args:
prefix (str): the prefix to add to images
images (dict): dict of image-specs from chartpress.yml
tag (str):
Specific tag to use instead of the last modified commit.
If unspecified the tag for each image will be the hash of the last commit
to modify the image's files.
commit_range (str):
The range of commits to consider, e.g. for building in CI.
If an image hasn't changed in the given range,
it will not be rebuilt.
push (bool):
Whether to push the resulting images (default: False).
chart_version (str):
The chart version, included as a prefix on image tags
if `tag` is not specified.
"""
value_modifications = {}
for name, options in images.items():
image_path = options.get('contextPath', os.path.join('images', name))
image_tag = tag
# include chartpress.yaml itself as it can contain build args and
# similar that influence the image that would be built
paths = list(options.get('paths', [])) + [image_path, 'chartpress.yaml']
last_commit = last_modified_commit(*paths)
if tag is None:
if chart_version:
image_tag = "{}-{}".format(chart_version, last_commit)
else:
image_tag = last_commit
image_name = prefix + name
image_spec = '{}:{}'.format(image_name, image_tag)
value_modifications[options['valuesPath']] = {
'repository': image_name,
'tag': SingleQuotedScalarString(image_tag),
}
template_namespace = {
'LAST_COMMIT': last_commit,
'TAG': image_tag,
}
if tag or image_needs_building(image_spec):
build_args = render_build_args(options, template_namespace)
build_image(image_path, image_spec, build_args, options.get('dockerfilePath'))
else:
print(f"Skipping build for {image_spec}, it already exists")
if push:
if tag or image_needs_pushing(image_spec):
check_call([
'docker', 'push', image_spec
])
else:
print(f"Skipping push for {image_spec}, already on registry")
return value_modifications | Build a collection of docker images
Args:
prefix (str): the prefix to add to images
images (dict): dict of image-specs from chartpress.yml
tag (str):
Specific tag to use instead of the last modified commit.
If unspecified the tag for each image will be the hash of the last commit
to modify the image's files.
commit_range (str):
The range of commits to consider, e.g. for building in CI.
If an image hasn't changed in the given range,
it will not be rebuilt.
push (bool):
Whether to push the resulting images (default: False).
chart_version (str):
The chart version, included as a prefix on image tags
if `tag` is not specified. | entailment |
def build_values(name, values_mods):
"""Update name/values.yaml with modifications"""
values_file = os.path.join(name, 'values.yaml')
with open(values_file) as f:
values = yaml.load(f)
for key, value in values_mods.items():
parts = key.split('.')
mod_obj = values
for p in parts:
mod_obj = mod_obj[p]
print(f"Updating {values_file}: {key}: {value}")
if isinstance(mod_obj, MutableMapping):
keys = IMAGE_REPOSITORY_KEYS & mod_obj.keys()
if keys:
for key in keys:
mod_obj[key] = value['repository']
else:
possible_keys = ' or '.join(IMAGE_REPOSITORY_KEYS)
raise KeyError(
f'Could not find {possible_keys} in {values_file}:{key}'
)
mod_obj['tag'] = value['tag']
else:
raise TypeError(
f'The key {key} in {values_file} must be a mapping.'
)
with open(values_file, 'w') as f:
yaml.dump(values, f) | Update name/values.yaml with modifications | entailment |
def build_chart(name, version=None, paths=None):
"""Update chart with specified version or last-modified commit in path(s)"""
chart_file = os.path.join(name, 'Chart.yaml')
with open(chart_file) as f:
chart = yaml.load(f)
if version is None:
if paths is None:
paths = ['.']
commit = last_modified_commit(*paths)
version = chart['version'].split('-')[0] + '-' + commit
chart['version'] = version
with open(chart_file, 'w') as f:
yaml.dump(chart, f)
return version | Update chart with specified version or last-modified commit in path(s) | entailment |
def publish_pages(name, paths, git_repo, published_repo, extra_message=''):
"""Publish helm chart index to github pages"""
version = last_modified_commit(*paths)
checkout_dir = '{}-{}'.format(name, version)
check_call([
'git', 'clone', '--no-checkout',
git_remote(git_repo), checkout_dir],
echo=False,
)
check_call(['git', 'checkout', 'gh-pages'], cwd=checkout_dir)
# package the latest version into a temporary directory
# and run helm repo index with --merge to update index.yaml
# without refreshing all of the timestamps
with TemporaryDirectory() as td:
check_call([
'helm', 'package', name,
'--destination', td + '/',
])
check_call([
'helm', 'repo', 'index', td,
'--url', published_repo,
'--merge', os.path.join(checkout_dir, 'index.yaml'),
])
# equivalent to `cp td/* checkout/`
# copies new helm chart and updated index.yaml
for f in os.listdir(td):
shutil.copy2(
os.path.join(td, f),
os.path.join(checkout_dir, f)
)
check_call(['git', 'add', '.'], cwd=checkout_dir)
if extra_message:
extra_message = '\n\n%s' % extra_message
else:
extra_message = ''
check_call([
'git',
'commit',
'-m', '[{}] Automatic update for commit {}{}'.format(name, version, extra_message)
], cwd=checkout_dir)
check_call(
['git', 'push', 'origin', 'gh-pages'],
cwd=checkout_dir,
) | Publish helm chart index to github pages | entailment |
def main():
"""Run chartpress"""
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument('--commit-range',
help='Range of commits to consider when building images')
argparser.add_argument('--push', action='store_true',
help='push built images to docker hub')
argparser.add_argument('--publish-chart', action='store_true',
help='publish updated chart to gh-pages')
argparser.add_argument('--tag', default=None,
help='Use this tag for images & charts')
argparser.add_argument('--extra-message', default='',
help='extra message to add to the commit message when publishing charts')
argparser.add_argument('--image-prefix', default=None,
help='override image prefix with this value')
args = argparser.parse_args()
with open('chartpress.yaml') as f:
config = yaml.load(f)
for chart in config['charts']:
chart_paths = ['.'] + list(chart.get('paths', []))
version = args.tag
if version:
# version of the chart shouldn't have leading 'v' prefix
# if tag is of the form 'v1.2.3'
version = version.lstrip('v')
chart_version = build_chart(chart['name'], paths=chart_paths, version=version)
if 'images' in chart:
image_prefix = args.image_prefix if args.image_prefix is not None else chart['imagePrefix']
value_mods = build_images(
prefix=image_prefix,
images=chart['images'],
tag=args.tag,
commit_range=args.commit_range,
push=args.push,
# exclude `-<hash>` from chart_version prefix for images
chart_version=chart_version.split('-', 1)[0],
)
build_values(chart['name'], value_mods)
if args.publish_chart:
publish_pages(chart['name'],
paths=chart_paths,
git_repo=chart['repo']['git'],
published_repo=chart['repo']['published'],
extra_message=args.extra_message,
) | Run chartpress | entailment |
def create_custom_field(connection, cf_type, cf_name, auto_attached, value_names=None, bundle_policy="0"):
"""
Creates custom field prototype(if not exist) and sets default values bundle if needed
Args:
connection: An opened Connection instance.
cf_type: Type of custom field to be created
cf_name: Name of custom field that should be created (if not exists)
auto_attached: If this field should be auto attached or not.
value_names: Values, that should be attached with this cf by default.
If None, no bundle is created to this field, if empty, empty bundle is created.
bundle_policy: ???
Raises:
LogicException: If custom field already exists, but has wrong type.
YouTrackException: If something is wrong with queries.
"""
if (value_names is None) and (not auto_attached or "[" not in cf_type):
_create_custom_field_prototype(connection, cf_type, cf_name, auto_attached)
return
if value_names is None:
value_names = set([])
else:
value_names = set(value_names)
field = _get_custom_field(connection, cf_name)
if field is not None:
if hasattr(field, "defaultBundle"):
bundle = connection.get_bundle(field.type, field.defaultBundle)
elif field.autoAttached:
return
else:
bundle = create_bundle_safe(connection, cf_name + "_bundle", cf_type)
else:
bundle = create_bundle_safe(connection, cf_name + "_bundle", cf_type)
_create_custom_field_prototype(connection, cf_type, cf_name, auto_attached,
{"defaultBundle": bundle.name,
"attachBundlePolicy": bundle_policy})
for value_name in value_names:
try:
connection.add_value_to_bundle(bundle, value_name)
except YouTrackException:
pass | Creates custom field prototype(if not exist) and sets default values bundle if needed
Args:
connection: An opened Connection instance.
cf_type: Type of custom field to be created
cf_name: Name of custom field that should be created (if not exists)
auto_attached: If this field should be auto attached or not.
value_names: Values, that should be attached with this cf by default.
If None, no bundle is created to this field, if empty, empty bundle is created.
bundle_policy: ???
Raises:
LogicException: If custom field already exists, but has wrong type.
YouTrackException: If something is wrong with queries. | entailment |
def process_custom_field(connection, project_id, cf_type, cf_name, value_names=None):
"""
Creates custom field and attaches it to the project. If custom field already exists and has type
cf_type it is attached to the project. If it has another type, LogicException is raised. If project field already
exists, uses it and bundle from it. If not, creates project field and bundle with name
<cf_name>_bundle_<project_id> for it.
Adds value_names to bundle.
Args:
connection: An opened Connection instance.
project_id: Id of the project to attach CF to.
cf_type: Type of cf to be created.
cf_name: Name of cf that should be created (if not exists) and attached to the project (if not yet attached)
value_names: Values, that cf must have. If None, does not create any bundle for the field. If empty list,
creates bundle, but does not create any value_names in it. If bundle already contains
some value_names, only value_names that do not already exist are added.
Raises:
LogicException: If custom field already exists, but has wrong type.
YouTrackException: If something is wrong with queries.
"""
_create_custom_field_prototype(connection, cf_type, cf_name)
if cf_type[0:-3] not in connection.bundle_types:
value_names = None
elif value_names is None:
value_names = []
existing_project_fields = [item for item in connection.getProjectCustomFields(project_id) if
utf8encode(item.name) == cf_name]
if len(existing_project_fields):
if value_names is None:
return
bundle = connection.getBundle(cf_type, existing_project_fields[0].bundle)
values_to_add = calculate_missing_value_names(bundle, value_names)
else:
if value_names is None:
connection.createProjectCustomFieldDetailed(project_id, cf_name, "No " + cf_name)
return
bundle = create_bundle_safe(connection, cf_name + "_bundle_" + project_id, cf_type)
values_to_add = calculate_missing_value_names(bundle, value_names)
connection.createProjectCustomFieldDetailed(project_id, cf_name, "No " + cf_name,
params={"bundle": bundle.name})
for name in values_to_add:
connection.addValueToBundle(bundle, bundle.createElement(name)) | Creates custom field and attaches it to the project. If custom field already exists and has type
cf_type it is attached to the project. If it has another type, LogicException is raised. If project field already
exists, uses it and bundle from it. If not, creates project field and bundle with name
<cf_name>_bundle_<project_id> for it.
Adds value_names to bundle.
Args:
connection: An opened Connection instance.
project_id: Id of the project to attach CF to.
cf_type: Type of cf to be created.
cf_name: Name of cf that should be created (if not exists) and attached to the project (if not yet attached)
value_names: Values, that cf must have. If None, does not create any bundle for the field. If empty list,
creates bundle, but does not create any value_names in it. If bundle already contains
some value_names, only value_names that do not already exist are added.
Raises:
LogicException: If custom field already exists, but has wrong type.
YouTrackException: If something is wrong with queries. | entailment |
def add_values_to_bundle_safe(connection, bundle, values):
"""
Adds values to specified bundle. Checks, whether each value already contains in bundle. If yes, it is not added.
Args:
connection: An opened Connection instance.
bundle: Bundle instance to add values in.
values: Values, that should be added in bundle.
Raises:
YouTrackException: if something is wrong with queries.
"""
for value in values:
try:
connection.addValueToBundle(bundle, value)
except YouTrackException as e:
if e.response.status == 409:
print("Value with name [ %s ] already exists in bundle [ %s ]" %
(utf8encode(value.name), utf8encode(bundle.name)))
else:
raise e | Adds values to specified bundle. Checks, whether each value already contains in bundle. If yes, it is not added.
Args:
connection: An opened Connection instance.
bundle: Bundle instance to add values in.
values: Values, that should be added in bundle.
Raises:
YouTrackException: if something is wrong with queries. | entailment |
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as YAML and returns the resulting data.
"""
assert yaml, 'YAMLParser requires pyyaml to be installed'
parser_context = parser_context or {}
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
try:
data = stream.read().decode(encoding)
return yaml.safe_load(data)
except (ValueError, yaml.parser.ParserError) as exc:
raise ParseError('YAML parse error - %s' % six.text_type(exc)) | Parses the incoming bytestream as YAML and returns the resulting data. | entailment |
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Renders `data` into serialized YAML.
"""
assert yaml, 'YAMLRenderer requires pyyaml to be installed'
if data is None:
return ''
return yaml.dump(
data,
stream=None,
encoding=self.charset,
Dumper=self.encoder,
allow_unicode=not self.ensure_ascii,
default_flow_style=self.default_flow_style
) | Renders `data` into serialized YAML. | entailment |
def create_closure_model(cls):
"""Creates a <Model>Closure model in the same module as the model."""
meta_vals = {
'unique_together': (("parent", "child"),)
}
if getattr(cls._meta, 'db_table', None):
meta_vals['db_table'] = '%sclosure' % getattr(cls._meta, 'db_table')
model = type('%sClosure' % cls.__name__, (models.Model,), {
'parent': models.ForeignKey(
cls.__name__,
related_name=cls.closure_parentref()
),
'child': models.ForeignKey(
cls.__name__,
related_name=cls.closure_childref()
),
'depth': models.IntegerField(),
'__module__': cls.__module__,
'__unicode__': _closure_model_unicode,
'Meta': type('Meta', (object,), meta_vals),
})
setattr(cls, "_closure_model", model)
return model | Creates a <Model>Closure model in the same module as the model. | entailment |
def _toplevel(cls):
"""Find the top level of the chain we're in.
For example, if we have:
C inheriting from B inheriting from A inheriting from ClosureModel
C._toplevel() will return A.
"""
superclasses = (
list(set(ClosureModel.__subclasses__()) &
set(cls._meta.get_parent_list()))
)
return next(iter(superclasses)) if superclasses else cls | Find the top level of the chain we're in.
For example, if we have:
C inheriting from B inheriting from A inheriting from ClosureModel
C._toplevel() will return A. | entailment |
def rebuildtable(cls):
"""Regenerate the entire closuretree."""
cls._closure_model.objects.all().delete()
cls._closure_model.objects.bulk_create([cls._closure_model(
parent_id=x['pk'],
child_id=x['pk'],
depth=0
) for x in cls.objects.values("pk")])
for node in cls.objects.all():
node._closure_createlink() | Regenerate the entire closuretree. | entailment |
def _closure_parent_pk(self):
"""What our parent pk is in the closure tree."""
if hasattr(self, "%s_id" % self._closure_parent_attr):
return getattr(self, "%s_id" % self._closure_parent_attr)
else:
parent = getattr(self, self._closure_parent_attr)
return parent.pk if parent else None | What our parent pk is in the closure tree. | entailment |
def _closure_deletelink(self, oldparentpk):
"""Remove incorrect links from the closure tree."""
self._closure_model.objects.filter(
**{
"parent__%s__child" % self._closure_parentref(): oldparentpk,
"child__%s__parent" % self._closure_childref(): self.pk
}
).delete() | Remove incorrect links from the closure tree. | entailment |
def _closure_createlink(self):
"""Create a link in the closure tree."""
linkparents = self._closure_model.objects.filter(
child__pk=self._closure_parent_pk
).values("parent", "depth")
linkchildren = self._closure_model.objects.filter(
parent__pk=self.pk
).values("child", "depth")
newlinks = [self._closure_model(
parent_id=p['parent'],
child_id=c['child'],
depth=p['depth']+c['depth']+1
) for p in linkparents for c in linkchildren]
self._closure_model.objects.bulk_create(newlinks) | Create a link in the closure tree. | entailment |
def get_ancestors(self, include_self=False, depth=None):
"""Return all the ancestors of this object."""
if self.is_root_node():
if not include_self:
return self._toplevel().objects.none()
else:
# Filter on pk for efficiency.
return self._toplevel().objects.filter(pk=self.pk)
params = {"%s__child" % self._closure_parentref():self.pk}
if depth is not None:
params["%s__depth__lte" % self._closure_parentref()] = depth
ancestors = self._toplevel().objects.filter(**params)
if not include_self:
ancestors = ancestors.exclude(pk=self.pk)
return ancestors.order_by("%s__depth" % self._closure_parentref()) | Return all the ancestors of this object. | entailment |
def get_descendants(self, include_self=False, depth=None):
"""Return all the descendants of this object."""
params = {"%s__parent" % self._closure_childref():self.pk}
if depth is not None:
params["%s__depth__lte" % self._closure_childref()] = depth
descendants = self._toplevel().objects.filter(**params)
if not include_self:
descendants = descendants.exclude(pk=self.pk)
return descendants.order_by("%s__depth" % self._closure_childref()) | Return all the descendants of this object. | entailment |
def prepopulate(self, queryset):
"""Perpopulate a descendants query's children efficiently.
Call like: blah.prepopulate(blah.get_descendants().select_related(stuff))
"""
objs = list(queryset)
hashobjs = dict([(x.pk, x) for x in objs] + [(self.pk, self)])
for descendant in hashobjs.values():
descendant._cached_children = []
for descendant in objs:
assert descendant._closure_parent_pk in hashobjs
parent = hashobjs[descendant._closure_parent_pk]
parent._cached_children.append(descendant) | Perpopulate a descendants query's children efficiently.
Call like: blah.prepopulate(blah.get_descendants().select_related(stuff)) | entailment |
def get_children(self):
"""Return all the children of this object."""
if hasattr(self, '_cached_children'):
children = self._toplevel().objects.filter(
pk__in=[n.pk for n in self._cached_children]
)
children._result_cache = self._cached_children
return children
else:
return self.get_descendants(include_self=False, depth=1) | Return all the children of this object. | entailment |
def get_root(self):
"""Return the furthest ancestor of this node."""
if self.is_root_node():
return self
return self.get_ancestors().order_by(
"-%s__depth" % self._closure_parentref()
)[0] | Return the furthest ancestor of this node. | entailment |
def is_descendant_of(self, other, include_self=False):
"""Is this node a descendant of `other`?"""
if other.pk == self.pk:
return include_self
return self._closure_model.objects.filter(
parent=other,
child=self
).exclude(pk=self.pk).exists() | Is this node a descendant of `other`? | entailment |
def is_ancestor_of(self, other, include_self=False):
"""Is this node an ancestor of `other`?"""
return other.is_descendant_of(self, include_self=include_self) | Is this node an ancestor of `other`? | entailment |
def quantize(number, digits=0, q=builtins.round):
"""
Quantize to somewhere in between a magnitude.
For example:
* ceil(55.25, 1.2) => 55.26
* floor(55.25, 1.2) => 55.24
* round(55.3333, 2.5) => 55.335
* round(12.345, 1.1) == round(12.345, 2) == 12.34
"""
base, fraction = split(digits)
# quantization beyond an order of magnitude results in a variable amount
# of decimal digits depending on the lowest common multiple,
# e.g. floor(1.2341234, 1.25) = 1.225 but floor(1.2341234, 1.5) = 1.20
if fraction * 10 % 1 > 0:
digits = base + 2
else:
digits = base + 1
multiplier = 10 ** base * invert(fraction, default=1)
quantized = q(number * multiplier) / multiplier
# additional rounding step to get rid of floating point math wonkiness
return builtins.round(quantized, digits) | Quantize to somewhere in between a magnitude.
For example:
* ceil(55.25, 1.2) => 55.26
* floor(55.25, 1.2) => 55.24
* round(55.3333, 2.5) => 55.335
* round(12.345, 1.1) == round(12.345, 2) == 12.34 | entailment |
def vectorize(fn):
"""
Allows a method to accept a list argument, but internally deal only
with a single item of that list.
"""
@functools.wraps(fn)
def vectorized_function(values, *vargs, **kwargs):
return [fn(value, *vargs, **kwargs) for value in values]
return vectorized_function | Allows a method to accept a list argument, but internally deal only
with a single item of that list. | entailment |
def engineering(value, precision=3, prefix=False, prefixes=SI):
""" Convert a number to engineering notation. """
display = decimal.Context(prec=precision)
value = decimal.Decimal(value).normalize(context=display)
string = value.to_eng_string()
if prefix:
prefixes = {e(exponent): prefix for exponent, prefix in prefixes.items()}
return replace(string, prefixes)
else:
return string | Convert a number to engineering notation. | entailment |
def business(values, precision=3, prefix=True, prefixes=SI, statistic=median, default=''):
"""
Convert a list of numbers to the engineering notation appropriate to a
reference point like the minimum, the median or the mean --
think of it as "business notation".
Any number will have at most the amount of significant digits of the
reference point, that is, the function will round beyond the
decimal point.
For example, if the reference is `233K`, this function will turn
1,175,125 into `1180K` and 11,234 into `11K` (instead of 1175K and
11.2K respectively.) This can help enormously with readability.
If the reference point is equal to or larger than E15 or
equal to or smaller than E-15, E12 and E-12 become the
reference point instead. (Petas and femtos are too
unfamiliar to people to be easily comprehended.)
"""
reference = statistic(values)
if not reference:
return upcast([''] * len(values), values)
exponent = order(reference)
e = bound(exponent - exponent % 3, -12, 12)
# the amount of decimals is the precision minus the amount of digits
# before the decimal point, which is one more than the relative order
# of magnitude (for example, 10^5 can be represented as 100K, with
# those three digits representing place values of 10^3, 10^4 and 10^5)
d = precision - (1 + exponent - e)
prefix = prefixes[e]
strings = []
for value in values:
if isnan(value):
strings.append('')
else:
normalized = value / 10.0 ** e
# use `round` for rounding (beyond the decimal point if necessary)
# use string formatting for padding to the right amount of decimals
# and to hide decimals when necessary (by default, floats are always
# displayed with a single decimal place, to distinguish them from
# integers)
relative_order = order(value) - exponent
places = min(d - relative_order, d)
normalized = round(normalized, places)
strings.append('{0:,.{1}f}'.format(normalized, d) + prefix)
return upcast(strings, values) | Convert a list of numbers to the engineering notation appropriate to a
reference point like the minimum, the median or the mean --
think of it as "business notation".
Any number will have at most the amount of significant digits of the
reference point, that is, the function will round beyond the
decimal point.
For example, if the reference is `233K`, this function will turn
1,175,125 into `1180K` and 11,234 into `11K` (instead of 1175K and
11.2K respectively.) This can help enormously with readability.
If the reference point is equal to or larger than E15 or
equal to or smaller than E-15, E12 and E-12 become the
reference point instead. (Petas and femtos are too
unfamiliar to people to be easily comprehended.) | entailment |
def chunked(iterator, chunksize):
"""
Yields items from 'iterator' in chunks of size 'chunksize'.
>>> list(chunked([1, 2, 3, 4, 5], chunksize=2))
[(1, 2), (3, 4), (5,)]
"""
chunk = []
for idx, item in enumerate(iterator, 1):
chunk.append(item)
if idx % chunksize == 0:
yield chunk
chunk = []
if chunk:
yield chunk | Yields items from 'iterator' in chunks of size 'chunksize'.
>>> list(chunked([1, 2, 3, 4, 5], chunksize=2))
[(1, 2), (3, 4), (5,)] | entailment |
def pre_start_check(self):
"""
Check if process accepts connections.
.. note::
Process will be considered started, when it'll be able to accept
TCP connections as defined in initializer.
"""
try:
sock = socket.socket()
sock.connect((self.host, self.port))
return True
except (socket.error, socket.timeout):
return False
finally:
# close socket manually for sake of PyPy
sock.close() | Check if process accepts connections.
.. note::
Process will be considered started, when it'll be able to accept
TCP connections as defined in initializer. | entailment |
def after_start_check(self):
"""Check if defined URL returns expected status to a HEAD request."""
try:
conn = HTTPConnection(self.host, self.port)
conn.request('HEAD', self.url.path)
status = str(conn.getresponse().status)
if status == self.status or self.status_re.match(status):
conn.close()
return True
except (HTTPException, socket.timeout, socket.error):
return False | Check if defined URL returns expected status to a HEAD request. | entailment |
def start(self):
"""
Start process.
:returns: itself
:rtype: OutputExecutor
.. note::
Process will be considered started, when defined banner will appear
in process output.
"""
super(OutputExecutor, self).start()
# get a polling object
self.poll_obj = select.poll()
# register a file descriptor
# POLLIN because we will wait for data to read
self.poll_obj.register(self.output(), select.POLLIN)
try:
self.wait_for(self._wait_for_output)
# unregister the file descriptor and delete the polling object
self.poll_obj.unregister(self.output())
finally:
del self.poll_obj
return self | Start process.
:returns: itself
:rtype: OutputExecutor
.. note::
Process will be considered started, when defined banner will appear
in process output. | entailment |
def _wait_for_output(self):
"""
Check if output matches banner.
.. warning::
Waiting for I/O completion. It does not work on Windows. Sorry.
"""
# Here we should get an empty list or list with a tuple [(fd, event)]
# When we get list with a tuple we can use readline method on
# the file descriptor.
poll_result = self.poll_obj.poll(0)
if poll_result:
line = self.output().readline()
if self._banner.match(line):
return True
return False | Check if output matches banner.
.. warning::
Waiting for I/O completion. It does not work on Windows. Sorry. | entailment |
def construct_mail(recipients=None, context=None, template_base='emailit/email', subject=None, message=None, site=None,
subject_templates=None, body_templates=None, html_templates=None, from_email=None, language=None,
**kwargs):
"""
usage:
construct_mail(['my@email.com'], {'my_obj': obj}, template_base='myapp/emails/my_obj_notification').send()
:param recipients: recipient or list of recipients
:param context: context for template rendering
:param template_base: the base template. '.subject.txt', '.body.txt' and '.body.html' will be added
:param subject: optional subject instead of rendering it through a template
:param message: optional message (will be inserted into the base email template)
:param site: the site this is on. uses current site by default
:param subject_templates: override the subject template
:param body_templates: override the body template
:param html_templates: override the html body template
:param from_email: defaults to settings.DEFAULT_FROM_EMAIL
:param language: the language that should be active for this email. defaults to currently active lang
:param kwargs: kwargs to pass into the Email class
:return:
"""
language = language or translation.get_language()
with force_language(language):
recipients = recipients or []
if isinstance(recipients, basestring):
recipients = [recipients]
from_email = from_email or settings.DEFAULT_FROM_EMAIL
subject_templates = subject_templates or get_template_names(language, template_base, 'subject', 'txt')
body_templates = body_templates or get_template_names(language, template_base, 'body', 'txt')
html_templates = html_templates or get_template_names(language, template_base, 'body', 'html')
if not context:
context = {}
site = site or Site.objects.get_current()
context['site'] = site
context['site_name'] = site.name
protocol = 'http' # TODO: this should come from settings
base_url = "%s://%s" % (protocol, site.domain)
if message:
context['message'] = message
subject = subject or render_to_string(subject_templates, context)
subject = subject.replace('\n', '').replace('\r', '').strip()
context['subject'] = subject
try:
html = render_to_string(html_templates, context)
except TemplateDoesNotExist:
html = ''
else:
html = premailer.transform(html, base_url=base_url)
try:
body = render_to_string(body_templates, context)
except TemplateDoesNotExist:
body = ''
mail = EmailMultiAlternatives(subject, body, from_email, recipients, **kwargs)
if not (body or html):
# this is so a meaningful exception can be raised
render_to_string([html_templates], context)
render_to_string([body_templates], context)
if html:
mail.attach_alternative(html, 'text/html')
return mail | usage:
construct_mail(['my@email.com'], {'my_obj': obj}, template_base='myapp/emails/my_obj_notification').send()
:param recipients: recipient or list of recipients
:param context: context for template rendering
:param template_base: the base template. '.subject.txt', '.body.txt' and '.body.html' will be added
:param subject: optional subject instead of rendering it through a template
:param message: optional message (will be inserted into the base email template)
:param site: the site this is on. uses current site by default
:param subject_templates: override the subject template
:param body_templates: override the body template
:param html_templates: override the html body template
:param from_email: defaults to settings.DEFAULT_FROM_EMAIL
:param language: the language that should be active for this email. defaults to currently active lang
:param kwargs: kwargs to pass into the Email class
:return: | entailment |
def cleanup_subprocesses():
"""On python exit: find possibly running subprocesses and kill them."""
# pylint: disable=redefined-outer-name, reimported
# atexit functions tends to loose global imports sometimes so reimport
# everything what is needed again here:
import os
import errno
from mirakuru.base_env import processes_with_env
from mirakuru.compat import SIGKILL
pids = processes_with_env(ENV_UUID, str(os.getpid()))
for pid in pids:
try:
os.kill(pid, SIGKILL)
except OSError as err:
if err.errno != errno.ESRCH:
print("Can not kill the", pid, "leaked process", err) | On python exit: find possibly running subprocesses and kill them. | entailment |
def start(self):
"""
Start defined process.
After process gets started, timeout countdown begins as well.
:returns: itself
:rtype: SimpleExecutor
.. note::
We want to open ``stdin``, ``stdout`` and ``stderr`` as text
streams in universal newlines mode, so we have to set
``universal_newlines`` to ``True``.
"""
if self.process is None:
command = self.command
if not self._shell:
command = self.command_parts
env = os.environ.copy()
# Trick with marking subprocesses with an environment variable.
#
# There is no easy way to recognize all subprocesses that were
# spawned during lifetime of a certain subprocess so mirakuru does
# this hack in order to mark who was the original parent. Even if
# some subprocess got daemonized or changed original process group
# mirakuru will be able to find it by this environment variable.
#
# There may be a situation when some subprocess will abandon
# original envs from parents and then it won't be later found.
env[ENV_UUID] = self._uuid
popen_kwargs = {
'shell': self._shell,
'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
'universal_newlines': True,
'env': env,
}
if platform.system() != 'Windows':
popen_kwargs['preexec_fn'] = os.setsid
self.process = subprocess.Popen(
command,
**popen_kwargs
)
self._set_timeout()
return self | Start defined process.
After process gets started, timeout countdown begins as well.
:returns: itself
:rtype: SimpleExecutor
.. note::
We want to open ``stdin``, ``stdout`` and ``stderr`` as text
streams in universal newlines mode, so we have to set
``universal_newlines`` to ``True``. | entailment |
def _clear_process(self):
"""
Close stdin/stdout of subprocess.
It is required because of ResourceWarning in Python 3.
"""
if self.process:
if self.process.stdin:
self.process.stdin.close()
if self.process.stdout:
self.process.stdout.close()
self.process = None
self._endtime = None | Close stdin/stdout of subprocess.
It is required because of ResourceWarning in Python 3. | entailment |
def _kill_all_kids(self, sig):
"""
Kill all subprocesses (and its subprocesses) that executor started.
This function tries to kill all leftovers in process tree that current
executor may have left. It uses environment variable to recognise if
process have origin in this Executor so it does not give 100 % and
some daemons fired by subprocess may still be running.
:param int sig: signal used to stop process run by executor.
:return: process ids (pids) of killed processes
:rtype list
"""
pids = processes_with_env(ENV_UUID, self._uuid)
for pid in pids:
log.debug("Killing process %d ...", pid)
try:
os.kill(pid, sig)
except OSError as err:
if err.errno in IGNORED_ERROR_CODES:
# the process has died before we tried to kill it.
pass
else:
raise
log.debug("Killed process %d.", pid)
return pids | Kill all subprocesses (and its subprocesses) that executor started.
This function tries to kill all leftovers in process tree that current
executor may have left. It uses environment variable to recognise if
process have origin in this Executor so it does not give 100 % and
some daemons fired by subprocess may still be running.
:param int sig: signal used to stop process run by executor.
:return: process ids (pids) of killed processes
:rtype list | entailment |
def kill(self, wait=True, sig=None):
"""
Kill the process if running.
:param bool wait: set to `True` to wait for the process to end,
or False, to simply proceed after sending signal.
:param int sig: signal used to kill process run by the executor.
None by default.
:returns: itself
:rtype: SimpleExecutor
"""
if sig is None:
sig = self._sig_kill
if self.running():
os.killpg(self.process.pid, sig)
if wait:
self.process.wait()
self._kill_all_kids(sig)
self._clear_process()
return self | Kill the process if running.
:param bool wait: set to `True` to wait for the process to end,
or False, to simply proceed after sending signal.
:param int sig: signal used to kill process run by the executor.
None by default.
:returns: itself
:rtype: SimpleExecutor | entailment |
def wait_for(self, wait_for):
"""
Wait for callback to return True.
Simply returns if wait_for condition has been met,
raises TimeoutExpired otherwise and kills the process.
:param callback wait_for: callback to call
:raises: mirakuru.exceptions.TimeoutExpired
:returns: itself
:rtype: SimpleExecutor
"""
while self.check_timeout():
if wait_for():
return self
time.sleep(self._sleep)
self.kill()
raise TimeoutExpired(self, timeout=self._timeout) | Wait for callback to return True.
Simply returns if wait_for condition has been met,
raises TimeoutExpired otherwise and kills the process.
:param callback wait_for: callback to call
:raises: mirakuru.exceptions.TimeoutExpired
:returns: itself
:rtype: SimpleExecutor | entailment |
def start(self):
"""
Start executor with additional checks.
Checks if previous executor isn't running then start process
(executor) and wait until it's started.
:returns: itself
:rtype: Executor
"""
if self.pre_start_check():
# Some other executor (or process) is running with same config:
raise AlreadyRunning(self)
super(Executor, self).start()
self.wait_for(self.check_subprocess)
return self | Start executor with additional checks.
Checks if previous executor isn't running then start process
(executor) and wait until it's started.
:returns: itself
:rtype: Executor | entailment |
def check_subprocess(self):
"""
Make sure the process didn't exit with an error and run the checks.
:rtype: bool
:return: the actual check status
:raise ProcessExitedWithError: when the main process exits with
an error
"""
exit_code = self.process.poll()
if exit_code is not None and exit_code != 0:
# The main process exited with an error. Clean up the children
# if any.
self._kill_all_kids(self._sig_kill)
self._clear_process()
raise ProcessExitedWithError(self, exit_code)
return self.after_start_check() | Make sure the process didn't exit with an error and run the checks.
:rtype: bool
:return: the actual check status
:raise ProcessExitedWithError: when the main process exits with
an error | entailment |
def processes_with_env_psutil(env_name, env_value):
"""
Find PIDs of processes having environment variable matching given one.
Internally it uses `psutil` library.
:param str env_name: name of environment variable to be found
:param str env_value: environment variable value prefix
:return: process identifiers (PIDs) of processes that have certain
environment variable equal certain value
:rtype: set
"""
pids = set()
for proc in psutil.process_iter():
try:
pinfo = proc.as_dict(attrs=['pid', 'environ'])
except (psutil.NoSuchProcess, IOError):
# can't do much if psutil is not able to get this process details
pass
else:
penv = pinfo.get('environ')
if penv and env_value in penv.get(env_name, ''):
pids.add(pinfo['pid'])
return pids | Find PIDs of processes having environment variable matching given one.
Internally it uses `psutil` library.
:param str env_name: name of environment variable to be found
:param str env_value: environment variable value prefix
:return: process identifiers (PIDs) of processes that have certain
environment variable equal certain value
:rtype: set | entailment |
def processes_with_env_ps(env_name, env_value):
"""
Find PIDs of processes having environment variable matching given one.
It uses `$ ps xe -o pid,cmd` command so it works only on systems
having such command available (Linux, MacOS). If not available function
will just log error.
:param str env_name: name of environment variable to be found
:param str env_value: environment variable value prefix
:return: process identifiers (PIDs) of processes that have certain
environment variable equal certain value
:rtype: set
"""
pids = set()
ps_xe = ''
try:
cmd = 'ps', 'xe', '-o', 'pid,cmd'
ps_xe = subprocess.check_output(cmd).splitlines()
except OSError as err:
if err.errno == errno.ENOENT:
log.error("`$ ps xe -o pid,cmd` command was called but it is not "
"available on this operating system. Mirakuru will not "
"be able to list the process tree and find if there are "
"any leftovers of the Executor.")
return pids
except subprocess.CalledProcessError:
log.error("`$ ps xe -o pid,cmd` command exited with non-zero code.")
env = '{0}={1}'.format(env_name, env_value)
for line in ps_xe:
line = str(line)
if env in line:
pids.add(int(PS_XE_PID_MATCH.match(line).group(1)))
return pids | Find PIDs of processes having environment variable matching given one.
It uses `$ ps xe -o pid,cmd` command so it works only on systems
having such command available (Linux, MacOS). If not available function
will just log error.
:param str env_name: name of environment variable to be found
:param str env_value: environment variable value prefix
:return: process identifiers (PIDs) of processes that have certain
environment variable equal certain value
:rtype: set | entailment |
def _ncc_c(x, y):
"""
>>> _ncc_c([1,2,3,4], [1,2,3,4])
array([ 0.13333333, 0.36666667, 0.66666667, 1. , 0.66666667,
0.36666667, 0.13333333])
>>> _ncc_c([1,1,1], [1,1,1])
array([ 0.33333333, 0.66666667, 1. , 0.66666667, 0.33333333])
>>> _ncc_c([1,2,3], [-1,-1,-1])
array([-0.15430335, -0.46291005, -0.9258201 , -0.77151675, -0.46291005])
"""
den = np.array(norm(x) * norm(y))
den[den == 0] = np.Inf
x_len = len(x)
fft_size = 1 << (2*x_len-1).bit_length()
cc = ifft(fft(x, fft_size) * np.conj(fft(y, fft_size)))
cc = np.concatenate((cc[-(x_len-1):], cc[:x_len]))
return np.real(cc) / den | >>> _ncc_c([1,2,3,4], [1,2,3,4])
array([ 0.13333333, 0.36666667, 0.66666667, 1. , 0.66666667,
0.36666667, 0.13333333])
>>> _ncc_c([1,1,1], [1,1,1])
array([ 0.33333333, 0.66666667, 1. , 0.66666667, 0.33333333])
>>> _ncc_c([1,2,3], [-1,-1,-1])
array([-0.15430335, -0.46291005, -0.9258201 , -0.77151675, -0.46291005]) | entailment |
def _ncc_c_2dim(x, y):
"""
Variant of NCCc that operates with 2 dimensional X arrays and 1 dimensional
y vector
Returns a 2 dimensional array of normalized fourier transforms
"""
den = np.array(norm(x, axis=1) * norm(y))
den[den == 0] = np.Inf
x_len = x.shape[-1]
fft_size = 1 << (2*x_len-1).bit_length()
cc = ifft(fft(x, fft_size) * np.conj(fft(y, fft_size)))
cc = np.concatenate((cc[:,-(x_len-1):], cc[:,:x_len]), axis=1)
return np.real(cc) / den[:, np.newaxis] | Variant of NCCc that operates with 2 dimensional X arrays and 1 dimensional
y vector
Returns a 2 dimensional array of normalized fourier transforms | entailment |
def _ncc_c_3dim(x, y):
"""
Variant of NCCc that operates with 2 dimensional X arrays and 2 dimensional
y vector
Returns a 3 dimensional array of normalized fourier transforms
"""
den = norm(x, axis=1)[:, None] * norm(y, axis=1)
den[den == 0] = np.Inf
x_len = x.shape[-1]
fft_size = 1 << (2*x_len-1).bit_length()
cc = ifft(fft(x, fft_size) * np.conj(fft(y, fft_size))[:, None])
cc = np.concatenate((cc[:,:,-(x_len-1):], cc[:,:,:x_len]), axis=2)
return np.real(cc) / den.T[:, :, None] | Variant of NCCc that operates with 2 dimensional X arrays and 2 dimensional
y vector
Returns a 3 dimensional array of normalized fourier transforms | entailment |
def _sbd(x, y):
"""
>>> _sbd([1,1,1], [1,1,1])
(-2.2204460492503131e-16, array([1, 1, 1]))
>>> _sbd([0,1,2], [1,2,3])
(0.043817112532485103, array([1, 2, 3]))
>>> _sbd([1,2,3], [0,1,2])
(0.043817112532485103, array([0, 1, 2]))
"""
ncc = _ncc_c(x, y)
idx = ncc.argmax()
dist = 1 - ncc[idx]
yshift = roll_zeropad(y, (idx + 1) - max(len(x), len(y)))
return dist, yshift | >>> _sbd([1,1,1], [1,1,1])
(-2.2204460492503131e-16, array([1, 1, 1]))
>>> _sbd([0,1,2], [1,2,3])
(0.043817112532485103, array([1, 2, 3]))
>>> _sbd([1,2,3], [0,1,2])
(0.043817112532485103, array([0, 1, 2])) | entailment |
def _extract_shape(idx, x, j, cur_center):
"""
>>> _extract_shape(np.array([0,1,2]), np.array([[1,2,3], [4,5,6]]), 1, np.array([0,3,4]))
array([-1., 0., 1.])
>>> _extract_shape(np.array([0,1,2]), np.array([[-1,2,3], [4,-5,6]]), 1, np.array([0,3,4]))
array([-0.96836405, 1.02888681, -0.06052275])
>>> _extract_shape(np.array([1,0,1,0]), np.array([[1,2,3,4], [0,1,2,3], [-1,1,-1,1], [1,2,2,3]]), 0, np.array([0,0,0,0]))
array([-1.2089303 , -0.19618238, 0.19618238, 1.2089303 ])
>>> _extract_shape(np.array([0,0,1,0]), np.array([[1,2,3,4],[0,1,2,3],[-1,1,-1,1],[1,2,2,3]]), 0, np.array([-1.2089303,-0.19618238,0.19618238,1.2089303]))
array([-1.19623139, -0.26273649, 0.26273649, 1.19623139])
"""
_a = []
for i in range(len(idx)):
if idx[i] == j:
if cur_center.sum() == 0:
opt_x = x[i]
else:
_, opt_x = _sbd(cur_center, x[i])
_a.append(opt_x)
a = np.array(_a)
if len(a) == 0:
return np.zeros((1, x.shape[1]))
columns = a.shape[1]
y = zscore(a, axis=1, ddof=1)
s = np.dot(y.transpose(), y)
p = np.empty((columns, columns))
p.fill(1.0/columns)
p = np.eye(columns) - p
m = np.dot(np.dot(p, s), p)
_, vec = eigh(m)
centroid = vec[:, -1]
finddistance1 = math.sqrt(((a[0] - centroid) ** 2).sum())
finddistance2 = math.sqrt(((a[0] + centroid) ** 2).sum())
if finddistance1 >= finddistance2:
centroid *= -1
return zscore(centroid, ddof=1) | >>> _extract_shape(np.array([0,1,2]), np.array([[1,2,3], [4,5,6]]), 1, np.array([0,3,4]))
array([-1., 0., 1.])
>>> _extract_shape(np.array([0,1,2]), np.array([[-1,2,3], [4,-5,6]]), 1, np.array([0,3,4]))
array([-0.96836405, 1.02888681, -0.06052275])
>>> _extract_shape(np.array([1,0,1,0]), np.array([[1,2,3,4], [0,1,2,3], [-1,1,-1,1], [1,2,2,3]]), 0, np.array([0,0,0,0]))
array([-1.2089303 , -0.19618238, 0.19618238, 1.2089303 ])
>>> _extract_shape(np.array([0,0,1,0]), np.array([[1,2,3,4],[0,1,2,3],[-1,1,-1,1],[1,2,2,3]]), 0, np.array([-1.2089303,-0.19618238,0.19618238,1.2089303]))
array([-1.19623139, -0.26273649, 0.26273649, 1.19623139]) | entailment |
def _kshape(x, k):
"""
>>> from numpy.random import seed; seed(0)
>>> _kshape(np.array([[1,2,3,4], [0,1,2,3], [-1,1,-1,1], [1,2,2,3]]), 2)
(array([0, 0, 1, 0]), array([[-1.2244258 , -0.35015476, 0.52411628, 1.05046429],
[-0.8660254 , 0.8660254 , -0.8660254 , 0.8660254 ]]))
"""
m = x.shape[0]
idx = randint(0, k, size=m)
centroids = np.zeros((k, x.shape[1]))
distances = np.empty((m, k))
for _ in range(100):
old_idx = idx
for j in range(k):
centroids[j] = _extract_shape(idx, x, j, centroids[j])
distances = (1 - _ncc_c_3dim(x, centroids).max(axis=2)).T
idx = distances.argmin(1)
if np.array_equal(old_idx, idx):
break
return idx, centroids | >>> from numpy.random import seed; seed(0)
>>> _kshape(np.array([[1,2,3,4], [0,1,2,3], [-1,1,-1,1], [1,2,2,3]]), 2)
(array([0, 0, 1, 0]), array([[-1.2244258 , -0.35015476, 0.52411628, 1.05046429],
[-0.8660254 , 0.8660254 , -0.8660254 , 0.8660254 ]])) | entailment |
def get_version_by_version_id(version_id):
"""
Get the internal version ID be the version.
:param Tuple version_id: Major and minor version number
:return: Internal version ID
:rtype: Integer|None
"""
for ver in registry.version_info:
if ver.version_id == version_id:
return ver.id
return None | Get the internal version ID be the version.
:param Tuple version_id: Major and minor version number
:return: Internal version ID
:rtype: Integer|None | entailment |
def get_version_name(version_id):
"""
Get the name of a protocol version by the internal version ID.
:param Integer version_id: Internal protocol version ID
:return: Name of the version
:rtype: String
"""
ver = registry.version_info.get(version_id)
if ver:
return ver.name
return 'unknown' | Get the name of a protocol version by the internal version ID.
:param Integer version_id: Internal protocol version ID
:return: Name of the version
:rtype: String | entailment |
def get_version_id(protocol_version):
"""
Get a tuple with major and minor version number
:param Integer protocol_version: Internal version ID
:return: Tuple of major and minor protocol version
:rtype: Tuple
"""
ver = registry.version_info.get(protocol_version)
if ver:
return ver.version_id | Get a tuple with major and minor version number
:param Integer protocol_version: Internal version ID
:return: Tuple of major and minor protocol version
:rtype: Tuple | entailment |
def update(self, b): # pylint: disable=method-hidden,invalid-name
"""Compress data given in b, returning compressed result either from this function or writing to fp). Note:
sometimes output might be zero length (if being buffered by lz4).
Raises Lz4FramedNoDataError if input is of zero length."""
with self.__lock:
output = compress_update(self.__ctx, b)
if self.__write:
self.__write(self.__header)
self.__header = None
self.__write(output)
self.update = self.__updateNextWrite
else:
header = self.__header
self.__header = None
self.update = self.__updateNextReturn
return header + output | Compress data given in b, returning compressed result either from this function or writing to fp). Note:
sometimes output might be zero length (if being buffered by lz4).
Raises Lz4FramedNoDataError if input is of zero length. | entailment |
def end(self):
"""Finalise lz4 frame, outputting any remaining as return from this function or by writing to fp)"""
with self.__lock:
if self.__write:
self.__write(compress_end(self.__ctx))
else:
return compress_end(self.__ctx) | Finalise lz4 frame, outputting any remaining as return from this function or by writing to fp) | entailment |
def get_value_name(self, pretty=False):
"""
Get the name of the value
:param Boolean pretty: Return the name in a pretty format
:return: The name
:rtype: String
"""
if pretty:
return "%s (%x)" % (
self.enums.get(self._value, "n/a"),
self._value
)
return self.enums.get(self._value, "n/a") | Get the name of the value
:param Boolean pretty: Return the name in a pretty format
:return: The name
:rtype: String | entailment |
def set_value(self, value, force=False):
"""
Set the value.
:param String|Integer value: The value to set. Must be in the enum list.
:param Boolean force: Set the value without checking it
:raises ValueError: If value name given but it isn't available
:raises TypeError: If value is not String or Integer
"""
if force:
self._value = value
return
if value is None:
self._value = value
return
if isinstance(value, six.integer_types):
self._value = value
return
if isinstance(value, six.string_types):
for v, n in self.enums.items():
if n == value:
self._value = v
return
raise ValueError("Unable to find value name in enum list")
raise TypeError(
"Value for '%s' must by of type String or Integer not '%s'" % (
self.name,
type(value)
)
) | Set the value.
:param String|Integer value: The value to set. Must be in the enum list.
:param Boolean force: Set the value without checking it
:raises ValueError: If value name given but it isn't available
:raises TypeError: If value is not String or Integer | entailment |
def dissect(self, data):
"""
Dissect the field.
:param bytes data: The data to extract the field value from
:return: The rest of the data not used to dissect the field value
:rtype: bytes
"""
size = struct.calcsize("B")
if len(data) < size:
raise NotEnoughData(
"Not enough data to decode field '%s' value" % self.name
)
curve_type = struct.unpack("B", data[:size])[0]
if curve_type == 0x03:
self._value = ECParametersNamedCurveField("none")
data = self._value.dissect(data)
else:
raise NotImplementedError(
"Decoding of KeyExchange message for curve 0x%.2X not implemented" % curve_type
)
return data | Dissect the field.
:param bytes data: The data to extract the field value from
:return: The rest of the data not used to dissect the field value
:rtype: bytes | entailment |
def compute_wcs(key, challenge):
"""
Compute an WAMP-CRA authentication signature from an authentication
challenge and a (derived) key.
:param key: The key derived (via PBKDF2) from the secret.
:type key: str/bytes
:param challenge: The authentication challenge to sign.
:type challenge: str/bytes
:return: The authentication signature.
:rtype: bytes
"""
key = key.encode('utf8')
challenge = challenge.encode('utf8')
sig = hmac.new(key, challenge, hashlib.sha256).digest()
return binascii.b2a_base64(sig).strip() | Compute an WAMP-CRA authentication signature from an authentication
challenge and a (derived) key.
:param key: The key derived (via PBKDF2) from the secret.
:type key: str/bytes
:param challenge: The authentication challenge to sign.
:type challenge: str/bytes
:return: The authentication signature.
:rtype: bytes | entailment |
def _register_procedure(self, procedure_name, invocation_policy="single"):
""" Register a "procedure" on a Client as callable over the Router.
"""
options = {"invoke": invocation_policy}
message = Register(procedure=procedure_name, options=options)
request_id = message.request_id
try:
self.send_message(message)
except ValueError:
raise WampProtocolError(
"failed to register callee: %s", procedure_name
)
self.request_ids[request_id] = procedure_name | Register a "procedure" on a Client as callable over the Router. | entailment |
def start(self):
""" Start Crossbar.io in a subprocess.
"""
if self.started is True:
raise WampyError("Router already started")
# will attempt to connect or start up the CrossBar
crossbar_config_path = self.config_path
cbdir = self.crossbar_directory
# starts the process from the root of the test namespace
cmd = [
'crossbar', 'start',
'--cbdir', cbdir,
'--config', crossbar_config_path,
]
self.proc = subprocess.Popen(cmd, preexec_fn=os.setsid)
self._wait_until_ready()
logger.info(
"Crosbar.io is ready for connections on %s (IPV%s)",
self.url, self.ipv
)
self.started = True | Start Crossbar.io in a subprocess. | entailment |
def _get_handshake_headers(self, upgrade):
""" Do an HTTP upgrade handshake with the server.
Websockets upgrade from HTTP rather than TCP largely because it was
assumed that servers which provide websockets will always be talking to
a browser. Maybe a reasonable assumption once upon a time...
The headers here will go a little further and also agree the
WAMP websocket JSON subprotocols.
"""
headers = []
# https://tools.ietf.org/html/rfc6455
headers.append("GET {} HTTP/1.1".format(self.websocket_location))
headers.append("Host: {}:{}".format(self.host, self.port))
headers.append("Upgrade: websocket")
headers.append("Connection: Upgrade")
# Sec-WebSocket-Key header containing base64-encoded random bytes,
# and the server replies with a hash of the key in the
# Sec-WebSocket-Accept header. This is intended to prevent a caching
# proxy from re-sending a previous WebSocket conversation and does not
# provide any authentication, privacy or integrity
headers.append("Sec-WebSocket-Key: {}".format(self.key))
headers.append("Origin: ws://{}:{}".format(self.host, self.port))
headers.append("Sec-WebSocket-Version: {}".format(WEBSOCKET_VERSION))
if upgrade:
headers.append("Sec-WebSocket-Protocol: {}".format(
WEBSOCKET_SUBPROTOCOLS)
)
logger.debug("connection headers: %s", headers)
return headers | Do an HTTP upgrade handshake with the server.
Websockets upgrade from HTTP rather than TCP largely because it was
assumed that servers which provide websockets will always be talking to
a browser. Maybe a reasonable assumption once upon a time...
The headers here will go a little further and also agree the
WAMP websocket JSON subprotocols. | entailment |
def parse_url(self):
""" Parses a URL of the form:
- ws://host[:port][path]
- wss://host[:port][path]
- ws+unix:///path/to/my.socket
"""
self.scheme = None
self.resource = None
self.host = None
self.port = None
if self.url is None:
return
scheme, url = self.url.split(":", 1)
parsed = urlsplit(url, scheme="http")
if parsed.hostname:
self.host = parsed.hostname
elif '+unix' in scheme:
self.host = 'localhost'
else:
raise ValueError("Invalid hostname from: %s", self.url)
if parsed.port:
self.port = parsed.port
if scheme == "ws":
if not self.port:
self.port = 8080
elif scheme == "wss":
if not self.port:
self.port = 443
elif scheme in ('ws+unix', 'wss+unix'):
pass
else:
raise ValueError("Invalid scheme: %s" % scheme)
if parsed.path:
resource = parsed.path
else:
resource = "/"
if '+unix' in scheme:
self.unix_socket_path = resource
resource = '/'
if parsed.query:
resource += "?" + parsed.query
self.scheme = scheme
self.resource = resource | Parses a URL of the form:
- ws://host[:port][path]
- wss://host[:port][path]
- ws+unix:///path/to/my.socket | entailment |
def generate_mask(cls, mask_key, data):
""" Mask data.
:Parameters:
mask_key: byte string
4 byte string(byte), e.g. '\x10\xc6\xc4\x16'
data: str
data to mask
"""
# Masking of WebSocket traffic from client to server is required
# because of the unlikely chance that malicious code could cause
# some broken proxies to do the wrong thing and use this as an
# attack of some kind. Nobody has proved that this could actually
# happen, but since the fact that it could happen was reason enough
# for browser vendors to get twitchy, masking was added to remove
# the possibility of it being used as an attack.
if data is None:
data = ""
data = bytearray(data, 'utf-8')
_m = array.array("B", mask_key)
_d = array.array("B", data)
for i in range(len(_d)):
_d[i] ^= _m[i % 4]
return _d.tostring() | Mask data.
:Parameters:
mask_key: byte string
4 byte string(byte), e.g. '\x10\xc6\xc4\x16'
data: str
data to mask | entailment |
def generate_bytes(cls, payload, fin_bit, opcode, mask_payload):
""" Format data to string (buffered_bytes) to send to server.
"""
# the first byte contains the FIN bit, the 3 RSV bits and the
# 4 opcode bits and for a client will *always* be 1000 0001 (or 129).
# so we want the first byte to look like...
#
# 1 0 0 0 0 0 0 1 (1 is a text frame)
# +-+-+-+-+-------+
# |F|R|R|R| opcode|
# |I|S|S|S| |
# |N|V|V|V| |
# | |1|2|3| |
# +-+-+-+-+-------+
# note that because all RSV bits are zero, we can ignore them
# this shifts each bit into position and bitwise ORs them together,
# using the struct module to pack them as incoming network bytes
frame = pack(
'!B', (
(fin_bit << 7) | opcode
)
)
# the second byte - and maybe the 7 after this, we'll use to tell
# the server how long our payload is.
# +-+-------------+-------------------------------+
# |M| Payload len | Extended payload length |
# |A| (7) | (16/63) |
# |S| | (if payload len==126/127) |
# |K| | |
# +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +
# | Extended payload length continued, if payload len == 127 |
# + - - - - - - - - - - - - - - - +-------------------------------+
# the mask is always included with client -> server, so the first bit
# of the second byte is always 1 which flags that the data is masked,
# i.e. encoded
if mask_payload:
mask_bit = 1 << 7
else:
mask_bit = 0 << 7
# next we have to | this bit with the payload length.
# note that we ensure that the payload is utf-8 encoded before we take
# the length because unicode characters can be >1 bytes in length and
# lead to bugs if we just do ``len(payload)``.
length = len(payload.encode('utf-8'))
if length >= Frame.MAX_LENGTH:
raise WebsocktProtocolError("data is too long")
# the second byte contains the payload length and mask
if length < Frame.LENGTH_7:
# we can simply represent payload length with first 7 bits
frame += pack('!B', (mask_bit | length))
elif length < Frame.LENGTH_16:
frame += pack('!B', (mask_bit | 126)) + pack('!H', length)
else:
frame += pack('!B', (mask_bit | 127)) + pack('!Q', length)
if mask_payload:
# we always mask frames from the client to server
# use a string of n random buffered_bytes for the mask
mask_key = os.urandom(4)
mask_data = cls.generate_mask(mask_key=mask_key, data=payload)
mask = mask_key + mask_data
frame += mask
else:
frame += bytearray(payload, 'utf-8')
return bytearray(frame) | Format data to string (buffered_bytes) to send to server. | entailment |
def add_edge(self, edge):
"""
Add edge (u, v) to the graph. Raises InvariantError if adding the edge
would form a cycle.
"""
u, v = edge
both_exist = u in self.vertices and v in self.vertices
# Using `is` because if they belong to the same component, they MUST
# share the same set object!
if both_exist and self.components[u] is self.components[v]:
# Both vertices are part of the same connected component.
raise InvariantError('Adding %r would form a cycle' % (edge,))
if u == v:
raise InvariantError('Cannot add loop: %r' % (edge,))
# Ensure the vertices exist in the graph.
self.add_vertex(u)
self.add_vertex(v)
# Add the edges to each other.
self._vertices[u].add(v)
self._vertices[v].add(u)
# Add all of the smaller components to the bigger one.
smaller_component, bigger_component = self.sort_components(u, v)
for vertex in smaller_component:
bigger_component.add(vertex)
# And with this assignment, say bye-bye to the smaller component.
self.components[vertex] = bigger_component | Add edge (u, v) to the graph. Raises InvariantError if adding the edge
would form a cycle. | entailment |
def edges(self):
"""
Edges of this graph, in canonical order.
"""
canonical_edges = set()
for v1, neighbours in self._vertices.items():
for v2 in neighbours:
edge = self.canonical_order((v1, v2))
canonical_edges.add(edge)
return canonical_edges | Edges of this graph, in canonical order. | entailment |
def ordered_deduplicate(sequence):
"""
Returns the sequence as a tuple with the duplicates removed,
preserving input order. Any duplicates following the first
occurrence are removed.
>>> ordered_deduplicate([1, 2, 3, 1, 32, 1, 2])
(1, 2, 3, 32)
Based on recipe from this StackOverflow post:
http://stackoverflow.com/a/480227
"""
seen = set()
# Micro optimization: each call to seen_add saves an extra attribute
# lookup in most iterations of the loop.
seen_add = seen.add
return tuple(x for x in sequence if not (x in seen or seen_add(x))) | Returns the sequence as a tuple with the duplicates removed,
preserving input order. Any duplicates following the first
occurrence are removed.
>>> ordered_deduplicate([1, 2, 3, 1, 32, 1, 2])
(1, 2, 3, 32)
Based on recipe from this StackOverflow post:
http://stackoverflow.com/a/480227 | entailment |
def hash_parameters(words, minimize_indices=False):
"""
Gives hash parameters for the given set of words.
>>> info = hash_parameters('sun mon tue wed thu fri sat'.split())
>>> len(info.t1)
21
>>> len(info.t2)
21
>>> len(info.g) # g values are 1-indexed...
22
"""
# Ensure that we have an indexable sequence.
words = tuple(words)
# Delegate to the hash builder.
return CzechHashBuilder(words).hash_info | Gives hash parameters for the given set of words.
>>> info = hash_parameters('sun mon tue wed thu fri sat'.split())
>>> len(info.t1)
21
>>> len(info.t2)
21
>>> len(info.g) # g values are 1-indexed...
22 | entailment |
def make_pickable_hash(words, *args, **kwargs):
"""
Creates an ordered, minimal perfect hash function for the given sequence
of words.
>>> hf = make_pickable_hash(['sun', 'mon', 'tue', 'wed', 'thu',
... 'fri', 'sat'])
>>> hf('fri')
5
>>> hf('sun')
0
"""
return PickableHash(CzechHashBuilder(words, *args, **kwargs)).czech_hash | Creates an ordered, minimal perfect hash function for the given sequence
of words.
>>> hf = make_pickable_hash(['sun', 'mon', 'tue', 'wed', 'thu',
... 'fri', 'sat'])
>>> hf('fri')
5
>>> hf('sun')
0 | entailment |
def make_dict(name, words, *args, **kwargs):
"""
make_dict(name, words, *args, **kwargs) -> mapping subclass
Takes a sequence of words (or a pre-built Czech HashInfo) and returns a
mapping subclass called `name` (used a dict) that employs the use of the
minimal perfect hash.
This mapping subclass has guaranteed O(1) worst-case lookups, additions,
and deletions, however is slower than dict() in practice.
>>> months = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
>>> MyDict = make_dict('MyDict', months)
>>> d = MyDict(dec=21, feb=None, may='hello')
>>> d['jul'] = False
>>> d
MyDict([('feb', None), ('may', 'hello'), ('jul', False), ('dec', 21)])
>>> del d['may']
>>> del d['apr']
Traceback (most recent call last):
...
KeyError: 'apr'
>>> len(d)
3
"""
info = CzechHashBuilder(words, *args, **kwargs)
# Create a docstring that at least describes where the class came from...
doc = """
Dictionary-like object that uses minimal perfect hashing, perserving
original order. This class was generated by `%s.%s(%r, ...)`.
""" % (__name__, make_dict.__name__, name)
# Delegate to create_dict.
return create_dict_subclass(name, info.hash_function, info.words, doc) | make_dict(name, words, *args, **kwargs) -> mapping subclass
Takes a sequence of words (or a pre-built Czech HashInfo) and returns a
mapping subclass called `name` (used a dict) that employs the use of the
minimal perfect hash.
This mapping subclass has guaranteed O(1) worst-case lookups, additions,
and deletions, however is slower than dict() in practice.
>>> months = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
>>> MyDict = make_dict('MyDict', months)
>>> d = MyDict(dec=21, feb=None, may='hello')
>>> d['jul'] = False
>>> d
MyDict([('feb', None), ('may', 'hello'), ('jul', False), ('dec', 21)])
>>> del d['may']
>>> del d['apr']
Traceback (most recent call last):
...
KeyError: 'apr'
>>> len(d)
3 | entailment |
def hash_function(self):
"""
Returns the hash function proper. Ensures that `self` is not bound to
the returned closure.
"""
assert hasattr(self, 'f1') and hasattr(self, 'f2')
# These are not just convenient aliases for the given
# attributes; if `self` would creep into the returned closure,
# that would ensure that a reference to this big, fat object
# would be kept alive; hence, any hash function would carry
# around all of the auxiliary state that was created during the
# generation of the hash parameters. Omitting `self` ensures
# this object has a chance to be garbage collected.
f1, f2, g = self.f1, self.f2, self.g
def czech_hash(word):
v1 = f1(word)
v2 = f2(word)
return g[v1] + g[v2]
return czech_hash | Returns the hash function proper. Ensures that `self` is not bound to
the returned closure. | entailment |
def generate_acyclic_graph(self):
"""
Generates an acyclic graph for the given words.
Adds the graph, and a list of edge-word associations to the object.
"""
# Maximum length of each table, respectively.
# Hardcoded n = cm, where c = 3
# There might be a good way to choose an appropriate C,
# but [1] suggests the average amount of iterations needed
# to generate an acyclic graph is sqrt(3).
self.n = 3 * len(self.words)
max_tries = len(self.words) ** 2
for trial in range(max_tries):
try:
self.generate_or_fail()
except forest.InvariantError:
continue
else:
# Generated successfully!
self.trials_taken = trial + 1
return
raise RuntimeError("Could not generate graph in "
"{} tries".format(max_tries)) | Generates an acyclic graph for the given words.
Adds the graph, and a list of edge-word associations to the object. | entailment |
def generate_random_table(self):
"""
Generates random tables for given word lists.
"""
table = list(range(0, self.n))
random.shuffle(table)
return table | Generates random tables for given word lists. | entailment |
def generate_or_fail(self):
"""
Attempts to generate a random acyclic graph, raising an
InvariantError if unable to.
"""
t1 = self.generate_random_table()
t2 = self.generate_random_table()
f1 = self.generate_func(t1)
f2 = self.generate_func(t2)
edges = [(f1(word), f2(word)) for word in self.words]
# Try to generate that graph, mack!
# Note that failure to generate the graph here should be caught
# by the caller.
graph = forest.ForestGraph(edges=edges)
# Associate each edge with its corresponding word.
associations = {}
for num in range(len(self.words)):
edge = edges[num]
word = self.words[num]
associations[graph.canonical_order(edge)] = (num, word)
# Assign all of these to the object.
for name in ('t1', 't2', 'f1', 'f2', 'graph', 'associations'):
self.__dict__[name] = locals()[name] | Attempts to generate a random acyclic graph, raising an
InvariantError if unable to. | entailment |
def generate_func(self, table):
"""
Generates a random table based mini-hashing function.
"""
# Ensure that `self` isn't suddenly in the closure...
n = self.n
def func(word):
return sum(x * ord(c) for x, c in zip(table, word)) % n
return func | Generates a random table based mini-hashing function. | entailment |
def create_dict_subclass(name, hash_func, slots, doc):
"""
Creates a dict subclass named name, using the hash_function to index
hash_length items. Doc should be any additional documentation added to the
class.
"""
hash_length = len(slots)
# Returns array index -- raises a KeyError if the key does not match
# its slot value.
def index_or_key_error(key):
index = hash_func(key)
# Make sure the key is **exactly** the same.
if key != slots[index]:
raise KeyError(key)
return index
def init(self, *args, **kwargs):
self._arr = [None] * hash_length
self._len = 0
# Delegate initialization to update provided by MutableMapping:
self.update(*args, **kwargs)
def getitem(self, key):
index = index_or_key_error(key)
if self._arr[index] is None:
raise KeyError(key)
return self._arr[index][1]
def setitem(self, key, value):
index = index_or_key_error(key)
self._arr[index] = (key, value)
def delitem(self, key):
index = index_or_key_error(key)
if self._arr[index] is None:
raise KeyError(key)
self._arr[index] = None
def dict_iter(self):
return (pair[0] for pair in self._arr if pair is not None)
def dict_len(self):
# TODO: Make this O(1) using auxiliary state?
return sum(1 for _ in self)
def dict_repr(self):
arr_repr = (repr(pair) for pair in self._arr if pair is not None)
return ''.join((name, '([', ', '.join(arr_repr), '])'))
# Inheriting from MutableMapping gives us a whole whackload of methods for
# free.
bases = (collections.MutableMapping,)
return type(name, bases, {
'__init__': init,
'__doc__': doc,
'__getitem__': getitem,
'__setitem__': setitem,
'__delitem__': delitem,
'__iter__': dict_iter,
'__len__': dict_len,
'__repr__': dict_repr,
}) | Creates a dict subclass named name, using the hash_function to index
hash_length items. Doc should be any additional documentation added to the
class. | entailment |
def validate(data, skiperrors=False, fixerrors=True):
"""Checks that the geojson data is a feature collection, that it
contains a proper "features" attribute, and that all features are valid too.
Returns True if all goes well.
- skiperrors will throw away any features that fail to validate.
- fixerrors will attempt to auto fix any minor errors without raising exceptions.
"""
if not "type" in data:
if fixerrors:
data["type"] = "FeatureCollection"
else:
raise ValueError("The geojson data needs to have a type key")
if not data["type"] == "FeatureCollection":
if fixerrors:
data["type"] = "FeatureCollection"
else:
raise ValueError("The geojson data needs to be a feature collection")
if "features" in data:
if not isinstance(data["features"], list):
raise ValueError("The features property needs to be a list")
else: raise ValueError("The FeatureCollection needs to contain a 'features' property")
if skiperrors:
for featuredict in data["features"]:
feat = Feature(featuredict)
try: feat.validate(fixerrors)
except: data["features"].remove(featuredict)
else:
for featuredict in data["features"]:
feat = Feature(featuredict)
feat.validate(fixerrors)
return True | Checks that the geojson data is a feature collection, that it
contains a proper "features" attribute, and that all features are valid too.
Returns True if all goes well.
- skiperrors will throw away any features that fail to validate.
- fixerrors will attempt to auto fix any minor errors without raising exceptions. | entailment |
def validate(self, fixerrors=True):
"""
Validates that the geometry is correctly formatted according to the geometry type.
Parameters:
- **fixerrors** (optional): Attempts to fix minor errors without raising exceptions (defaults to True)
Returns:
- True if the geometry is valid.
Raises:
- An Exception if not valid.
"""
# validate nullgeometry or has type and coordinates keys
if not self._data:
# null geometry, no further checking needed
return True
elif "type" not in self._data or "coordinates" not in self._data:
raise Exception("A geometry dictionary or instance must have the type and coordinates entries")
# first validate geometry type
if not self.type in ("Point","MultiPoint","LineString","MultiLineString","Polygon","MultiPolygon"):
if fixerrors:
coretype = self.type.lower().replace("multi","")
if coretype == "point":
newtype = "Point"
elif coretype == "linestring":
newtype = "LineString"
elif coretype == "polygon":
newtype = "Polygon"
else:
raise Exception('Invalid geometry type. Must be one of: "Point","MultiPoint","LineString","MultiLineString","Polygon","MultiPolygon"')
if self.type.lower().startswith("multi"):
newtype = "Multi" + newtype
self.type = newtype
else:
raise Exception('Invalid geometry type. Must be one of: "Point","MultiPoint","LineString","MultiLineString","Polygon","MultiPolygon"')
# then validate coordinate data type
coords = self._data["coordinates"]
if not isinstance(coords, (list,tuple)): raise Exception("Coordinates must be a list or tuple type")
# then validate coordinate structures
if self.type == "Point":
if not len(coords) == 2: raise Exception("Point must be one coordinate pair")
elif self.type in ("MultiPoint","LineString"):
if not len(coords) > 1: raise Exception("MultiPoint and LineString must have more than one coordinates")
elif self.type == "MultiLineString":
for line in coords:
if not len(line) > 1: raise Exception("All LineStrings in a MultiLineString must have more than one coordinate")
elif self.type == "Polygon":
for exterior_or_holes in coords:
if not len(exterior_or_holes) >= 3: raise Exception("The exterior and all holes in a Polygon must have at least 3 coordinates")
elif self.type == "MultiPolygon":
for eachmulti in coords:
for exterior_or_holes in eachmulti:
if not len(exterior_or_holes) >= 3: raise Exception("The exterior and all holes in all Polygons of a MultiPolygon must have at least 3 coordinates")
# validation successful
return True | Validates that the geometry is correctly formatted according to the geometry type.
Parameters:
- **fixerrors** (optional): Attempts to fix minor errors without raising exceptions (defaults to True)
Returns:
- True if the geometry is valid.
Raises:
- An Exception if not valid. | entailment |
def validate(self, fixerrors=True):
"""
Validates that the feature is correctly formatted.
Parameters:
- **fixerrors** (optional): Attempts to fix minor errors without raising exceptions (defaults to True)
Returns:
- True if the feature is valid.
Raises:
- An Exception if not valid.
"""
if not "type" in self._data or self._data["type"] != "Feature":
if fixerrors:
self._data["type"] = "Feature"
else:
raise Exception("A geojson feature dictionary must contain a type key and it must be named 'Feature'.")
if not "geometry" in self._data:
if fixerrors:
self.geometry = Geometry() # nullgeometry
else:
raise Exception("A geojson feature dictionary must contain a geometry key.")
if not "properties" in self._data or not isinstance(self.properties,dict):
if fixerrors:
self._data["properties"] = dict()
else:
raise Exception("A geojson feature dictionary must contain a properties key and it must be a dictionary type.")
self.geometry.validate(fixerrors)
return True | Validates that the feature is correctly formatted.
Parameters:
- **fixerrors** (optional): Attempts to fix minor errors without raising exceptions (defaults to True)
Returns:
- True if the feature is valid.
Raises:
- An Exception if not valid. | entailment |
def all_attributes(self):
"""
Collect and return a list of all attributes/properties/fields used in any of the features.
"""
features = self._data["features"]
if not features: return []
elif len(features) == 1: return features[0]["properties"].keys()
else:
fields = set(features[0]["properties"].keys())
for feature in features[1:]:
fields.update(feature["properties"].keys())
return list(fields) | Collect and return a list of all attributes/properties/fields used in any of the features. | entailment |
def common_attributes(self):
"""
Collect and return a list of attributes/properties/fields common to all features.
"""
features = self._data["features"]
if not features: return []
elif len(features) == 1: return features[0]["properties"].keys()
else:
fields = set(features[0]["properties"].keys())
for feature in features[1:]:
fields.intersection_update(feature["properties"].keys())
return list(fields) | Collect and return a list of attributes/properties/fields common to all features. | entailment |
def add_feature(self, obj=None, geometry=None, properties=None):
"""
Adds a given feature. If obj isn't specified, geometry and properties can be set as arguments directly.
Parameters:
- **obj**: Another feature instance, an object with the \_\_geo_interface__ or a geojson dictionary of the Feature type.
- **geometry** (optional): Anything that the Geometry instance can accept.
- **properties** (optional): A dictionary of key-value property pairs.
"""
properties = properties or {}
if isinstance(obj, Feature):
# instead of creating copy, the original feat should reference the same one that was added here
feat = obj._data
elif isinstance(obj, dict):
feat = obj.copy()
else:
feat = Feature(geometry=geometry, properties=properties).__geo_interface__
self._data["features"].append(feat) | Adds a given feature. If obj isn't specified, geometry and properties can be set as arguments directly.
Parameters:
- **obj**: Another feature instance, an object with the \_\_geo_interface__ or a geojson dictionary of the Feature type.
- **geometry** (optional): Anything that the Geometry instance can accept.
- **properties** (optional): A dictionary of key-value property pairs. | entailment |
def define_crs(self, type, name=None, link=None, link_type=None):
"""
Defines the coordinate reference system for the geojson file.
For link crs, only online urls are currenlty supported
(no auxilliary crs files).
Parameters:
- **type**: The type of crs, either "name" or "link".
- **name** (optional): The crs name as an OGC formatted crs string (eg "urn:ogc:def:crs:..."), required if type is "name"
- **link**: The crs online url link address, required if type is "link".
- **link_type**: The type of crs link, optional if type is "link".
"""
if not type in ("name","link"): raise Exception("type must be either 'name' or 'link'")
crs = self._data["crs"] = {"type":type, "properties":{} }
if type == "name":
if not name: raise Exception("name argument must be given")
crs["properties"]["name"] = name
elif type == "link":
if not link: raise Exception("link argument must be given")
crs["properties"]["href"] = link
if link_type:
crs["properties"]["type"] = link_type | Defines the coordinate reference system for the geojson file.
For link crs, only online urls are currenlty supported
(no auxilliary crs files).
Parameters:
- **type**: The type of crs, either "name" or "link".
- **name** (optional): The crs name as an OGC formatted crs string (eg "urn:ogc:def:crs:..."), required if type is "name"
- **link**: The crs online url link address, required if type is "link".
- **link_type**: The type of crs link, optional if type is "link". | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.