text stringlengths 4 1.02M | meta dict |
|---|---|
"""Files API.
.. deprecated:: 1.8.1
Use Google Cloud Storage Client library instead.
Blobstore-specific Files API calls."""
from __future__ import with_statement
__all__ = ['create', 'get_blob_key', 'get_file_name']
import hashlib
import urllib
from google.appengine.api import datastore
from google.appengine.api import namespace_manager
from google.appengine.api.files import file as files
from google.appengine.ext import blobstore
_BLOBSTORE_FILESYSTEM = files.BLOBSTORE_FILESYSTEM
_BLOBSTORE_DIRECTORY = '/' + _BLOBSTORE_FILESYSTEM + '/'
_BLOBSTORE_NEW_FILE_NAME = 'new'
_MIME_TYPE_PARAMETER = 'content_type'
_BLOBINFO_UPLOADED_FILENAME_PARAMETER = 'file_name'
_DATASTORE_MAX_PROPERTY_SIZE = 500
def create(mime_type='application/octet-stream',
_blobinfo_uploaded_filename=None):
"""Create a writable blobstore file.
Args:
mime_type: Resulting blob content MIME type as string.
_blobinfo_uploaded_filename: Resulting blob's BlobInfo file name as string.
Returns:
A file name for blobstore file. This file can be opened for write
by File API open function. To read the file or obtain its blob key, finalize
it and call get_blob_key function.
"""
if not mime_type:
raise files.InvalidArgumentError('Empty mime_type')
if not isinstance(mime_type, basestring):
raise files.InvalidArgumentError('Expected string for mime_type')
params = {_MIME_TYPE_PARAMETER: mime_type}
if _blobinfo_uploaded_filename:
if not isinstance(_blobinfo_uploaded_filename, basestring):
raise files.InvalidArgumentError(
'Expected string for _blobinfo_uploaded_filename')
params[_BLOBINFO_UPLOADED_FILENAME_PARAMETER] = _blobinfo_uploaded_filename
return files._create(_BLOBSTORE_FILESYSTEM, params=params)
_BLOB_FILE_INDEX_KIND = '__BlobFileIndex__'
_BLOB_KEY_PROPERTY_NAME = 'blob_key'
def _get_blob_file_index_key_name(creation_handle):
"""Get key name for a __BlobFileIndex__ entity.
Returns creation_handle if it is < _DATASTORE_MAX_PROPERTY_SIZE
symbols and its sha512 otherwise.
"""
if len(creation_handle) < _DATASTORE_MAX_PROPERTY_SIZE:
return creation_handle
return hashlib.sha512(creation_handle).hexdigest()
def get_blob_key(create_file_name):
"""Get a blob key for finalized blobstore file.
Args:
create_file_name: Writable blobstore filename as obtained from create()
function. The file should be finalized.
Returns:
An instance of apphosting.ext.blobstore.BlobKey for corresponding blob
or None if the blob referred to by the file name is not finalized.
Raises:
google.appengine.api.files.InvalidFileNameError if the file name is not
a valid nonfinalized blob file name.
"""
if not create_file_name:
raise files.InvalidArgumentError('Empty file name')
if not isinstance(create_file_name, basestring):
raise files.InvalidArgumentError('Expected string for file name')
if not create_file_name.startswith(_BLOBSTORE_DIRECTORY):
raise files.InvalidFileNameError(
'Filename %s passed to get_blob_key doesn\'t have prefix %s' %
(create_file_name, _BLOBSTORE_DIRECTORY))
ticket = create_file_name[len(_BLOBSTORE_DIRECTORY):]
if not ticket.startswith(files._CREATION_HANDLE_PREFIX):
return blobstore.BlobKey(ticket)
blob_file_index = datastore.Get([datastore.Key.from_path(
_BLOB_FILE_INDEX_KIND,
_get_blob_file_index_key_name(ticket),
namespace='')])[0]
if blob_file_index:
blob_key_str = blob_file_index[_BLOB_KEY_PROPERTY_NAME]
results = datastore.Get([datastore.Key.from_path(
blobstore.BLOB_INFO_KIND, blob_key_str, namespace='')])
if results[0] is None:
return None
elif len(ticket) >= _DATASTORE_MAX_PROPERTY_SIZE:
return None
else:
query = datastore.Query(blobstore.BLOB_INFO_KIND,
{'creation_handle =': ticket},
keys_only=True,
namespace='')
results = query.Get(1)
if not results:
return None
blob_key_str = results[0].name()
return blobstore.BlobKey(blob_key_str)
def get_file_name(blob_key):
"""Get a filename to read from the blob.
Args:
blob_key: An instance of BlobKey.
Returns:
File name as string which can be used with File API to read the file.
"""
if not blob_key:
raise files.InvalidArgumentError('Empty blob key')
if not isinstance(blob_key, (blobstore.BlobKey, basestring)):
raise files.InvalidArgumentError('Expected string or blobstore.BlobKey')
return '%s%s' % (_BLOBSTORE_DIRECTORY, blob_key)
| {
"content_hash": "42ef256b7c20209a7ce0dccd1f7f2f46",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 80,
"avg_line_length": 29.621794871794872,
"alnum_prop": 0.7048257952824064,
"repo_name": "dcroc16/skunk_works",
"id": "65d5ea1e5af857629540df9e30db11b57fef387c",
"size": "5226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google_appengine/google/appengine/api/files/blobstore.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "407860"
},
{
"name": "C++",
"bytes": "20"
},
{
"name": "CSS",
"bytes": "251658"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "JavaScript",
"bytes": "784750"
},
{
"name": "PHP",
"bytes": "2381119"
},
{
"name": "Python",
"bytes": "51887444"
},
{
"name": "Shell",
"bytes": "32889"
},
{
"name": "TeX",
"bytes": "3149"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
} |
'''Basic tests about the client'''
from common import TestQless
class TestClient(TestQless):
'''Test the client'''
def test_track(self):
'''Gives us access to track and untrack jobs'''
self.client.queues['foo'].put('Foo', {}, jid='jid')
self.client.track('jid')
self.assertEqual(self.client.jobs.tracked()['jobs'][0].jid, 'jid')
self.client.untrack('jid')
self.assertEqual(self.client.jobs.tracked(),
{'jobs': [], 'expired': {}})
def test_attribute_error(self):
'''Throws AttributeError for non-attributes'''
self.assertRaises(AttributeError, lambda: self.client.foo)
def test_tags(self):
'''Provides access to top tags'''
self.assertEqual(self.client.tags(), {})
for _ in range(10):
self.client.queues['foo'].put('Foo', {}, tags=['foo'])
self.assertEqual(self.client.tags(), ['foo'])
def test_unfail(self):
'''Provides access to unfail'''
jids = map(str, range(10))
for jid in jids:
self.client.queues['foo'].put('Foo', {}, jid=jid)
self.client.queues['foo'].pop().fail('foo', 'bar')
for jid in jids:
self.assertEqual(self.client.jobs[jid].state, 'failed')
self.client.unfail('foo', 'foo')
for jid in jids:
self.assertEqual(self.client.jobs[jid].state, 'waiting')
class TestJobs(TestQless):
'''Test the Jobs class'''
def test_basic(self):
'''Can give us access to jobs'''
self.assertEqual(self.client.jobs['jid'], None)
self.client.queues['foo'].put('Foo', {}, jid='jid')
self.assertNotEqual(self.client.jobs['jid'], None)
def test_recurring(self):
'''Can give us access to recurring jobs'''
self.assertEqual(self.client.jobs['jid'], None)
self.client.queues['foo'].recur('Foo', {}, 60, jid='jid')
self.assertNotEqual(self.client.jobs['jid'], None)
def test_complete(self):
'''Can give us access to complete jobs'''
self.assertEqual(self.client.jobs.complete(), [])
self.client.queues['foo'].put('Foo', {}, jid='jid')
self.client.queues['foo'].pop().complete()
self.assertEqual(self.client.jobs.complete(), ['jid'])
def test_tracked(self):
'''Gives us access to tracked jobs'''
self.assertEqual(self.client.jobs.tracked(),
{'jobs': [], 'expired': {}})
self.client.queues['foo'].put('Foo', {}, jid='jid')
self.client.track('jid')
self.assertEqual(self.client.jobs.tracked()['jobs'][0].jid, 'jid')
def test_tagged(self):
'''Gives us access to tagged jobs'''
self.assertEqual(self.client.jobs.tagged('foo'),
{'total': 0, 'jobs': {}})
self.client.queues['foo'].put('Foo', {}, jid='jid', tags=['foo'])
self.assertEqual(self.client.jobs.tagged('foo')['jobs'][0], 'jid')
def test_failed(self):
'''Gives us access to failed jobs'''
self.assertEqual(self.client.jobs.failed('foo'),
{'total': 0, 'jobs': []})
self.client.queues['foo'].put('Foo', {}, jid='jid')
self.client.queues['foo'].pop().fail('foo', 'bar')
self.assertEqual(self.client.jobs.failed('foo')['jobs'][0].jid, 'jid')
def test_failures(self):
'''Gives us access to failure types'''
self.assertEqual(self.client.jobs.failed(), {})
self.client.queues['foo'].put('Foo', {}, jid='jid')
self.client.queues['foo'].pop().fail('foo', 'bar')
self.assertEqual(self.client.jobs.failed(), {'foo': 1})
class TestQueues(TestQless):
'''Test the Queues class'''
def test_basic(self):
'''Gives us access to queues'''
self.assertNotEqual(self.client.queues['foo'], None)
def test_counts(self):
'''Gives us access to counts'''
self.assertEqual(self.client.queues.counts, {})
self.client.queues['foo'].put('Foo', {})
self.assertEqual(self.client.queues.counts, [{
'scheduled': 0,
'name': 'foo',
'paused': False,
'waiting': 1,
'depends': 0,
'running': 0,
'stalled': 0,
'recurring': 0
}])
def test_attribute_error(self):
'''Raises AttributeErrors for non-attributes'''
self.assertRaises(AttributeError, lambda: self.client.queues.foo)
class TestWorkers(TestQless):
'''Test the Workers class'''
def test_individual(self):
'''Gives us access to individual workers'''
self.client.queues['foo'].put('Foo', {}, jid='jid')
self.assertEqual(self.client.workers['worker'],
{'jobs': [], 'stalled': []})
self.worker.queues['foo'].pop()
self.assertEqual(self.client.workers['worker'],
{'jobs': ['jid'], 'stalled': []})
def test_counts(self):
'''Gives us access to worker counts'''
self.client.queues['foo'].put('Foo', {}, jid='jid')
self.assertEqual(self.client.workers.counts, {})
self.worker.queues['foo'].pop()
self.assertEqual(self.client.workers.counts,
[{'jobs': 1, 'name': 'worker', 'stalled': 0}])
def test_attribute_error(self):
'''Raises AttributeErrors for non-attributes'''
self.assertRaises(AttributeError, lambda: self.client.workers.foo)
# This is used for TestRetry
class Foo(object):
from qless import retry
@staticmethod
@retry(ValueError)
def process(job):
'''This is supposed to raise an Exception'''
if 'valueerror' in job.tags:
raise ValueError('Foo')
else:
raise Exception('Foo')
class TestRetry(TestQless):
'''Test the retry decorator'''
def test_basic(self):
'''Ensure the retry decorator works'''
# The first time, it should just be retries automatically
self.client.queues['foo'].put(Foo, {}, tags=['valueerror'], jid='jid')
self.client.queues['foo'].pop().process()
# Not remove the tag so it should fail
self.client.jobs['jid'].untag('valueerror')
self.client.queues['foo'].pop().process()
self.assertEqual(self.client.jobs['jid'].state, 'failed')
def test_docstring(self):
'''Retry decorator should preserve docstring'''
self.assertEqual(Foo.process.__doc__,
'This is supposed to raise an Exception')
| {
"content_hash": "84e618d85b6287899066547b636abecd",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 78,
"avg_line_length": 37.45348837209303,
"alnum_prop": 0.5811859670909656,
"repo_name": "seomoz/qless-py",
"id": "f9be6467f899fdf593ee964aaef1df72ae23a9fa",
"size": "6442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "427"
},
{
"name": "Python",
"bytes": "100084"
},
{
"name": "Shell",
"bytes": "1037"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import json
from django.forms import widgets
from django.utils.safestring import mark_safe
from django.utils.encoding import force_text
from django.utils.html import format_html, format_html_join
def flatatt(attrs):
"""
Pilfered from `django.forms.utils`:
Convert a dictionary of attributes to a single string.
The returned string will contain a leading space followed by key="value",
XML-style pairs. In the case of a boolean value, the key will appear
without a value. Otherwise, the value is formatted through its own dict of `attrs`,
which can be useful to parametrize Angular directives.
It is assumed that the keys do not need to be
XML-escaped. If the passed dictionary is empty, then return an empty
string.
The result is passed through 'mark_safe' (by way of 'format_html_join').
"""
key_value_attrs = []
boolean_attrs = []
for attr, value in attrs.items():
if isinstance(value, bool):
if value:
boolean_attrs.append((attr,))
else:
try:
value = value.format(**attrs)
except KeyError:
pass
key_value_attrs.append((attr, value))
return (
format_html_join('', ' {}="{}"', sorted(key_value_attrs)) +
format_html_join('', ' {}', sorted(boolean_attrs))
)
#class ChoiceFieldRenderer(widgets.ChoiceFieldRenderer):
# def render(self):
# """
# Outputs a <ul ng-form="name"> for this set of choice fields to nest an ngForm.
# """
# start_tag = format_html('<ul {0}>', mark_safe(' '.join(self.field_attrs)))
# output = [start_tag]
# for widget in self:
# output.append(format_html('<li>{0}</li>', force_text(widget)))
# output.append('</ul>')
# return mark_safe('\n'.join(output))
#
#
#class CheckboxChoiceInput(widgets.CheckboxChoiceInput):
# def tag(self, attrs=None):
# attrs = attrs or self.attrs
# name = '{0}.{1}'.format(self.name, self.choice_value)
# tag_attrs = dict(attrs, type=self.input_type, name=name, value=self.choice_value)
# if 'id' in attrs:
# tag_attrs['id'] = '{0}_{1}'.format(attrs['id'], self.index)
# if 'ng-model' in attrs:
# tag_attrs['ng-model'] = "{0}['{1}']".format(attrs['ng-model'], self.choice_value)
# if self.is_checked():
# tag_attrs['checked'] = 'checked'
# return format_html('<input{0} />', flatatt(tag_attrs))
class CheckboxFieldRendererMixin(object):
def __init__(self, name, value, attrs, choices):
attrs.pop('djng-error', None)
self.field_attrs = [format_html('ng-form="{0}"', name)]
if attrs.pop('multiple_checkbox_required', False):
field_names = [format_html('{0}.{1}', name, choice) for choice, dummy in choices]
self.field_attrs.append(format_html('validate-multiple-fields="{0}"', json.dumps(field_names)))
super(CheckboxFieldRendererMixin, self).__init__(name, value, attrs, choices)
#class CheckboxFieldRenderer(CheckboxFieldRendererMixin, ChoiceFieldRenderer):
# choice_input_class = CheckboxChoiceInput
#
#
#class CheckboxSelectMultiple(widgets.CheckboxSelectMultiple):
# """
# Form fields of type 'MultipleChoiceField' using the widget 'CheckboxSelectMultiple' must behave
# slightly different from the original. This widget overrides the default functionality.
# """
# renderer = CheckboxFieldRenderer
#
# def implode_multi_values(self, name, data):
# """
# Due to the way Angular organizes it model, when Form data is sent via a POST request,
# then for this kind of widget, the posted data must to be converted into a format suitable
# for Django's Form validation.
# """
# mkeys = [k for k in data.keys() if k.startswith(name + '.')]
# mvls = [data.pop(k)[0] for k in mkeys]
# if mvls:
# data.setlist(name, mvls)
#
# def convert_ajax_data(self, field_data):
# """
# Due to the way Angular organizes it model, when this Form data is sent using Ajax,
# then for this kind of widget, the sent data has to be converted into a format suitable
# for Django's Form validation.
# """
# return [key for key, val in field_data.items() if val]
#
# def get_field_attrs(self, field):
# return {'multiple_checkbox_required': field.required}
class RadioFieldRendererMixin(object):
def __init__(self, name, value, attrs, choices):
attrs.pop('djng-error', None)
self.field_attrs = []
if attrs.pop('radio_select_required', False):
self.field_attrs.append(format_html('validate-multiple-fields="{0}"', name))
super(RadioFieldRendererMixin, self).__init__(name, value, attrs, choices)
#class RadioFieldRenderer(RadioFieldRendererMixin, ChoiceFieldRenderer):
# choice_input_class = widgets.RadioChoiceInput
#
#
#class RadioSelect(widgets.RadioSelect):
# """
# Form fields of type 'ChoiceField' using the widget 'RadioSelect' must behave
# slightly different from the original. This widget overrides the default functionality.
# """
# renderer = RadioFieldRenderer
#
# def get_field_attrs(self, field):
# return {'radio_select_required': field.required}
| {
"content_hash": "eed67496a9461be8e72d7be8a4e6d7f0",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 107,
"avg_line_length": 39.785185185185185,
"alnum_prop": 0.6397318935021411,
"repo_name": "centrologic/django-codenerix",
"id": "587de619dac5ede9e7f561c3d27d734504a41b3c",
"size": "5395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codenerix/djng/widgets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "611079"
},
{
"name": "HTML",
"bytes": "113074"
},
{
"name": "JavaScript",
"bytes": "1060640"
},
{
"name": "Python",
"bytes": "482014"
}
],
"symlink_target": ""
} |
import uuid
import redis
redis_server = redis.StrictRedis(host='localhost', port=6379, db=0)
def create_code(number=200):
code_result = []
while True is True:
temp = str(uuid.uuid1()).replace('-', '')
if temp not in code_result:
code_result.append(temp)
if len(code_result) is number:
break
return code_result
def clean_up(prefix='showmethecode'):
keys = redis_server.keys('%s_*' % prefix)
for key in keys:
redis_server.delete(key)
def insert_code(code, prefix='showmethecode'):
redis_server.set('%s_%s' % (prefix, code), code)
def select_codes(prefix='showmethecode'):
keys = redis_server.keys('%s_*' % prefix)
select_result = []
for key in keys:
select_result.append(redis_server.get(key))
return select_result
if __name__ == '__main__':
clean_up()
codes = create_code()
for c in codes:
insert_code(c)
result = select_codes()
print result
| {
"content_hash": "b7095932277218f1e2239a62ae176702",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 67,
"avg_line_length": 25.153846153846153,
"alnum_prop": 0.6095820591233435,
"repo_name": "zhangmianhongni/MyPractice",
"id": "d8490c6be2a000ae16531d905f434dfe5b6aa51d",
"size": "1125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/Python 练习册(show-me-the-code)/0003/0003.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "22329"
},
{
"name": "C++",
"bytes": "781"
},
{
"name": "DIGITAL Command Language",
"bytes": "456078"
},
{
"name": "HTML",
"bytes": "1703"
},
{
"name": "Java",
"bytes": "883436"
},
{
"name": "Jupyter Notebook",
"bytes": "1063584"
},
{
"name": "Python",
"bytes": "93136"
},
{
"name": "XSLT",
"bytes": "1400"
}
],
"symlink_target": ""
} |
from uuid import uuid4
from changes.config import db
from changes.constants import Result, Status
from changes.models.filecoverage import FileCoverage
from changes.testutils import APITestCase
class BuildCoverageTest(APITestCase):
def test_error(self):
fake_build_id = uuid4()
path = '/api/0/builds/{0}/coverage/'.format(fake_build_id.hex)
resp = self.client.get(path)
assert resp.status_code == 404
def test_merging(self):
project = self.create_project()
build = self.create_build(
project, status=Status.finished, result=Result.passed)
# One build with two jobs.
job1 = self.create_job(build)
phase1 = self.create_jobphase(job1)
step1 = self.create_jobstep(phase1)
job2 = self.create_job(build)
phase2 = self.create_jobphase(job2)
step2 = self.create_jobstep(phase2)
# Two jobs contribute to coverage for foo.py.
db.session.add(FileCoverage(
step_id=step1.id,
job_id=job1.id,
project_id=project.id,
lines_covered=1,
lines_uncovered=1,
filename="foo.py",
data="NNUC",
))
db.session.add(FileCoverage(
step_id=step1.id,
job_id=job1.id,
project_id=project.id,
lines_covered=1,
lines_uncovered=1,
filename="bar.py",
data="CNNU",
))
db.session.add(FileCoverage(
step_id=step2.id,
job_id=job2.id,
project_id=project.id,
lines_covered=1,
lines_uncovered=1,
filename="foo.py",
data="NUCN",
))
db.session.commit()
path = '/api/0/builds/{0}/coverage/'.format(build.id.hex)
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert data == {
"foo.py": "NUCC", # Merged.
"bar.py": "CNNU",
}
| {
"content_hash": "525969e68878a74b12cc7794c7d109c6",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 70,
"avg_line_length": 30.073529411764707,
"alnum_prop": 0.5550122249388753,
"repo_name": "dropbox/changes",
"id": "3b44c4f444e4bc15ab6a6d158467ab546c89c3dd",
"size": "2045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/changes/api/test_build_coverage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24837"
},
{
"name": "HTML",
"bytes": "21274"
},
{
"name": "JavaScript",
"bytes": "380548"
},
{
"name": "Makefile",
"bytes": "6148"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "2189624"
},
{
"name": "Shell",
"bytes": "4150"
}
],
"symlink_target": ""
} |
'''
Written by Lijun An and CBIG under MIT license:
https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
'''
import os
import numpy as np
import pandas as pd
from copy import deepcopy
from utils.misc import create_folder, load_pkl, save_pkl
def pick_well_matched_pairs(args,
step4_output_path,
step5_output_path,
name1='MACC',
name2='ADNI'):
"""
Pick time points with well matched MMSE
Args:
args (tuple): Parameters
step4_output_path (str): Path for step4 output
step5_output_path (str): Path for step5 output
name1 (str, optional): Name for dataset1. Defaults to 'MACC'.
name2 (str, optional): Name for dataset2. Defaults to 'ADNI'.
"""
step4_threshold_path = os.path.join(step4_output_path,
'Threshold_' + str(args.threshold))
step5_threshold_path = os.path.join(step5_output_path,
'Threshold_' + str(args.threshold))
create_folder(step5_threshold_path)
# read matched_df
matched_df = pd.read_csv(os.path.join(step4_threshold_path, 'matched.csv'))
raw_dataset1 = pd.read_csv(os.path.join(args.MACC_data_path))
raw_dataset2 = pd.read_csv(os.path.join(args.ADNI_data_path))
subs_dataset1 = np.unique(matched_df.MACC_RID)
cols_dataset1 = [
'MACC_1', 'MACC_2', 'MACC_3', 'MACC_4', 'MACC_5', 'MACC_6'
]
cols_dataset1 = cols_dataset1[:args.threshold]
cols_dataset2 = [
'ADNI_1', 'ADNI_2', 'ADNI_3', 'ADNI_4', 'ADNI_5', 'ADNI_6'
]
cols_dataset2 = cols_dataset2[:args.threshold]
# pick well matched time points
for sub_dataset1 in subs_dataset1:
# get matched subject pair
matched_row = matched_df[(matched_df.MACC_RID == sub_dataset1)]
dates_dataset1 = matched_row[cols_dataset1].values.astype(str)
dates_dataset1 = dates_dataset1[dates_dataset1 != str(np.nan)]
sub_dataset2 = matched_row['ADNI_RID'].values[0]
dates_dataset2 = matched_row[cols_dataset2].values.astype(str)
dates_dataset2 = dates_dataset2[dates_dataset2 != str(np.nan)]
assert len(dates_dataset1) == len(
dates_dataset2), 'Ineqaul matched TPs'
# Test whether MMSE differnce is larger than args.epslion
nb_visits = len(dates_dataset1)
for i in range(nb_visits):
date_dataset1 = dates_dataset1[i]
mask_dataset1 = (raw_dataset1.RID == sub_dataset1) & (
raw_dataset1.EXAMDATE == date_dataset1)
if raw_dataset1[mask_dataset1]['MMSE'].values.shape[0] >= 1:
mmse_dataset1 = raw_dataset1[mask_dataset1]['MMSE'].values[0]
else:
mmse_dataset1 = np.nan
date_dataset2 = dates_dataset2[i]
mask_dataset2 = (raw_dataset2.RID == sub_dataset2) & (
raw_dataset2.EXAMDATE == date_dataset2)
if raw_dataset2[mask_dataset2]['MMSE'].values.shape[0] >= 1:
mmse_dataset2 = raw_dataset2[mask_dataset2]['MMSE'].values[0]
else:
mmse_dataset2 = np.nan
# checkwether MMSE difference is within epslion
if not np.isnan(mmse_dataset1) and not np.isnan(mmse_dataset2):
if abs(mmse_dataset1 - mmse_dataset2) > args.mmse_eps:
# replace matched date with NaN
matched_df.iloc[matched_df[(
matched_df.MACC_RID == sub_dataset1)].index, i +
1] = np.nan
matched_df.iloc[matched_df[(
matched_df.MACC_RID == sub_dataset1)].index, i + 2 +
args.threshold] = np.nan
else:
matched_df.iloc[matched_df[(matched_df.MACC_RID == sub_dataset1
)].index, i + 1] = np.nan
matched_df.iloc[matched_df[(
matched_df.MACC_RID == sub_dataset1)].index, i + 2 +
args.threshold] = np.nan
# we need to clean matched_df
matched_df = clean_dropped_matched(matched_df, args.threshold)
matched_df = rm_empty_rows(matched_df, cols_dataset1)
matched_df = rm_empty_cols(matched_df, cols_dataset1, cols_dataset2)
# save the matched_df
matched_df.to_csv(
os.path.join(step5_threshold_path, 'picked_matched.csv'),
sep=',',
index=False)
def clean_dropped_matched(matched_df, threshold):
"""
Clean dropped matched df by moving date to deleted date
Args:
matched_df (class DataFrame): Matched dataframe
threshold (int): Threshold to beigin matching
"""
for i in range(len(matched_df)):
for k in range(threshold):
macc_date = matched_df.iloc[i, k + 1]
if isinstance(macc_date, float):
for j in range(k + 1, threshold):
if isinstance(matched_df.iloc[i, j + 1], str):
matched_df.iloc[i, k + 1] = matched_df.iloc[i, j + 1]
matched_df.iloc[i, j + 1] = np.nan
matched_df.iloc[i, k + 2 + threshold] = \
matched_df.iloc[i, j + 2 + threshold]
matched_df.iloc[i, j + 2 + threshold] = np.nan
continue
return matched_df
def rm_empty_rows(matched_df, cols_dataset1):
"""
Remove rows where all timepoints are poorly matched
Args:
matched_df (class DataFrame): Matched dataframe
cols_dataset1 (list): Columns for dataset1
"""
_matched_df = deepcopy(matched_df)
for i in range(len(matched_df)):
row = matched_df.iloc[i]
dates = row[cols_dataset1].values
dates = dates.astype(str)
dates = dates[dates != str(np.nan)]
if dates.shape[0] == 0:
# drop!
_matched_df.drop(i, inplace=True)
return _matched_df
def rm_empty_cols(matched_df, cols_dataset1, cols_dataset2):
"""
Remove columns where all timepoints are poorly matched
Args:
matched_df (class DataFrame): Matched dataframe
cols_dataset1 (list): Columns for dataset1
cols_dataset2 (list): Columns for dataset1
"""
_matched_df = deepcopy(matched_df)
empty_cols = []
for i in range(len(cols_dataset1)):
series = matched_df[cols_dataset1[i]].values
series = series.astype(str)
series = series[series != str(np.nan)]
if series.shape[0] == 0:
empty_cols.append(cols_dataset1[i])
empty_cols.append(cols_dataset2[i])
_matched_df.drop(empty_cols, axis=1, inplace=True)
return _matched_df
def split_matched_csv(args, bin, step5_output_path, name1='MACC',
name2='ADNI'):
"""
Split the matched csv file into MACC(AIBL) and ADNI
Args:
args (tuple): Parameters
bin (int): Bin
step5_output_path (str): Path for step4 output
name1 (str, optional): Name for dataset1. Defaults to 'MACC'.
name2 (str, optional): Name for dataset2. Defaults to 'ADNI'.
"""
output_path = os.path.join(step5_output_path,
'Threshold_' + str(args.threshold))
# read matched csv path
matched_df = pd.read_csv(os.path.join(output_path, 'picked_matched.csv'))
nb_tps = int(matched_df.shape[1] / 2 - 1)
# MACC (AIBL)
cols_dataset1 = []
cols_dataset1.append('MACC_RID')
for t in range(1, nb_tps + 1):
tp_name = name1 + '_' + str(t)
cols_dataset1.append(tp_name)
matched_dataset1 = pd.read_csv(
os.path.join(output_path, 'picked_matched.csv'), usecols=cols_dataset1)
new_cols_dataset1 = []
new_cols_dataset1.append('RID')
for t in range(1, nb_tps + 1):
tp_name = str(t)
new_cols_dataset1.append(tp_name)
matched_dataset1.columns = new_cols_dataset1
# ADNI
cols_dataset2 = []
cols_dataset2.append('ADNI_RID')
for t in range(1, nb_tps + 1):
tp_name = name2 + '_' + str(t)
cols_dataset2.append(tp_name)
matched_dataset2 = pd.read_csv(
os.path.join(output_path, 'picked_matched.csv'), usecols=cols_dataset2)
new_cols_dataset2 = []
new_cols_dataset2.append('RID')
for t in range(1, nb_tps + 1):
tp_name = str(t)
new_cols_dataset2.append(tp_name)
matched_dataset2.columns = new_cols_dataset2
# we need to control #matched subjects
raw_dataset1 = pd.read_csv(
os.path.join(args.checkpoint_path, args.matching_pair,
'matching_' + str(args.nb_bins) + 'BINs',
'BIN_' + str(bin), 'MACC_' + str(bin) + '_bin.csv'))
raw_dataset2 = pd.read_csv(args.ADNI_data_path)
nb_bin_subjects = len(np.unique(raw_dataset1.RID))
if matched_dataset1.shape[0] <= round(args.match_ratio * nb_bin_subjects):
matched_dataset1.to_csv(
os.path.join(output_path, 'picked_' + name1 + '.csv'),
sep=',',
index=False)
matched_dataset2.to_csv(
os.path.join(output_path, 'picked_' + name2 + '.csv'),
sep=',',
index=False)
matched_subs_dataset1 = np.unique(matched_dataset1.RID)
matched_subs_dataset2 = np.unique(matched_dataset2.RID)
else:
# we need to drop some subjects
matched_cost = gen_matched_subjects_cost_table(
args, raw_dataset1, raw_dataset2, matched_dataset1,
matched_dataset2, output_path)
rm_macc_subs = matched_cost.iloc[int(args.match_ratio *
nb_bin_subjects):, 0].values
rm_adni_subs = matched_cost.iloc[int(args.match_ratio *
nb_bin_subjects):, 1].values
rm_high_cost_pairs(
rm_macc_subs, matched_dataset1,
os.path.join(output_path, 'picked_' + name1 + '.csv'))
rm_high_cost_pairs(
rm_adni_subs, matched_dataset2,
os.path.join(output_path, 'picked_' + name2 + '.csv'))
dataset1 = pd.read_csv(
os.path.join(output_path, 'picked_' + name1 + '.csv'))
dataset2 = pd.read_csv(
os.path.join(output_path, 'picked_' + name2 + '.csv'))
matched_subs_dataset1 = np.unique(dataset1.RID)
matched_subs_dataset2 = np.unique(dataset2.RID)
return matched_subs_dataset1, matched_subs_dataset2
def gen_matched_subjects_cost_table(args, raw_dataset1, raw_dataset2,
picked_MACC, picked_ADNI, output_path):
"""
Generate cost table for matched subejcts to drop high cost pairs
Args:
args (tuple): Parameters
raw_dataset1 (class DataFrame): Dataframe for raw dataset1
raw_dataset2 (class DataFrame): Dataframe for raw dataset1
picked_MACC (class DataFrame): Dataframe for picked dataset1
picked_ADNI (class DataFrame): Dataframe for picked dataset2
output_path (str): Path for saving output
"""
matched_macc_subjects = np.unique(picked_MACC.RID)
cost_rows = []
for macc_sub in matched_macc_subjects:
cost_row = []
# get matched_macc dates and matched_adni sub and dates
sub_mask = (picked_MACC.RID == macc_sub)
macc_dates = picked_MACC.iloc[picked_MACC[sub_mask].
index, 1:].values[0]
macc_dates = macc_dates.astype(str)
macc_dates = macc_dates[macc_dates != str(np.nan)]
adni_sub = picked_ADNI.iloc[picked_MACC[sub_mask].index, 0].values[0]
adni_dates = picked_ADNI.iloc[picked_MACC[sub_mask].
index, 1:].values[0]
adni_dates = adni_dates.astype(str)
adni_dates = adni_dates[adni_dates != str(np.nan)]
assert len(adni_dates) == len(adni_dates), 'Wrong matching case'
cost, mmse_cost = matching_cost(args, raw_dataset1, raw_dataset2,
macc_sub, macc_dates, adni_sub,
adni_dates)
cost_row.append(macc_sub)
cost_row.append(adni_sub)
cost_row.append(cost)
cost_row.append(mmse_cost)
cost_rows.append(cost_row)
# generate dataframe
matched_cost = pd.DataFrame(
data=cost_rows, columns=['MACC_RID', 'ADNI_RID', 'Cost', 'MMSE_cost'])
# sort according to cost
matched_cost.sort_values(
by=['Cost'], inplace=True, ascending=True, ignore_index=True)
matched_cost.to_csv(
os.path.join(output_path, 'cost.csv'), sep=',', index=False)
return matched_cost
def matching_cost(args, raw_dataset1, raw_dataset2, macc_sub, macc_dates,
adni_sub, adni_dates):
"""
Calculate matching cost for matched subject pairs
Args:
args (tuple): Parameters
raw_dataset1 (class DataFrame): Dataframe for raw dataset1
raw_dataset2 (class DataFrame): Dataframe for raw dataset1
macc_sub (list): List of MACC subject RIDs
macc_dates (list): List of MACC dates
adni_sub (list): List of ADNI subject RIDs
adni_dates (list): List of ADNI dates
"""
cost = 0
mmse_cost = 0
nb_dates = len(macc_dates)
for i, macc_date in enumerate(macc_dates):
macc_mask = (raw_dataset1.RID == macc_sub) & (
raw_dataset1.EXAMDATE == macc_date)
adni_mask = (raw_dataset2.RID == adni_sub) & (
raw_dataset2.EXAMDATE == adni_dates[i])
# cost for age
macc_age = raw_dataset1.loc[macc_mask, ['AGE']].values[0][0]
adni_age = raw_dataset2.loc[adni_mask, ['AGE']].values[0][0]
cost += matching_cost_one_measure(args.age_penalty, args.NANpenalty,
macc_age, adni_age)
# cost for sex
macc_sex = raw_dataset1.loc[macc_mask, ['SEX']].values[0][0]
adni_sex = raw_dataset2.loc[adni_mask, ['SEX']].values[0][0]
cost += matching_cost_one_measure(args.sex_penalty, args.NANpenalty,
macc_sex, adni_sex)
# cost for dx
macc_dx = raw_dataset1.loc[macc_mask, ['DX']].values[0][0]
adni_dx = raw_dataset2.loc[adni_mask, ['DX']].values[0][0]
cost += matching_cost_one_measure(args.dx_penalty, args.NANpenalty,
macc_dx, adni_dx)
# cost for mmse
macc_mmse = raw_dataset1.loc[macc_mask, ['MMSE']].values[0][0]
adni_mmse = raw_dataset2.loc[adni_mask, ['MMSE']].values[0][0]
mmse_cost += matching_cost_one_measure(
args.mmse_penalty, args.NANpenalty, macc_mmse, adni_mmse)
cost += mmse_cost
return cost / nb_dates, mmse_cost / nb_dates
def matching_cost_one_measure(penalty, NANpenalty, macc, adni):
"""
Calulate matching cost for one measure for matched time points
Args:
penalty (int): Penalty for punishing badly matched
NANpenalty (int): Penalty for punishing missing data
macc (ndarray): Vector for MACC
adni (ndarray): Vector for ADNI
"""
if np.isnan(macc):
return NANpenalty
elif np.isnan(adni):
return NANpenalty
else:
return penalty * np.abs(adni - macc)
def rm_high_cost_pairs(rm_subs, picked_df, save_path):
"""
Remove matched pairs with relatively high cost
Args:
rm_subs (list): List of subjectw with high costs
picked_df (class DataFrame): Picked matched dataframe
save_path (str): Path for saving output
"""
row_indexes = []
for sub in rm_subs:
index = picked_df[picked_df.RID == sub].index.values[0]
row_indexes.append(index)
# drop
controled_df = picked_df.drop(row_indexes, axis=0)
controled_df.reset_index(drop=True, inplace=True)
# note that we need to check whether having empty cols
cols = list(controled_df.columns)
df = deepcopy(controled_df)
empty_cols = []
for i in range(len(cols)):
series = controled_df[cols[i]].values
series = series.astype(str)
series = series[series != str(np.nan)]
if series.shape[0] == 0:
empty_cols.append(cols[i])
df.drop(empty_cols, axis=1, inplace=True)
# save
df.to_csv(save_path, sep=',', index=False)
def update_next_round_data(args,
bin,
matched_subs_dataset1,
matched_subs_dataset2,
name1='MACC',
name2='ADNI'):
"""
Remove the matched MACC subjects and ADNI subjects in next round
Args:
args (tuple): Parameters
bin (int): Bin
matched_subs_dataset1 (list): List of matched subjects RIDs of dataset1
matched_subs_dataset2 (list): List of matched subjects RIDs of dataset1
name1 (str, optional): Name for dataset1. Defaults to 'MACC'.
name2 (str, optional): Name for dataset2. Defaults to 'ADNI'.
"""
curr_round_data_path = os.path.join(
args.checkpoint_path, args.matching_pair,
'matching_' + str(args.nb_bins) + 'BINs', 'BIN_' + str(bin),
'round_' + str(args.round))
next_round_data_path = os.path.join(
args.checkpoint_path, args.matching_pair,
'matching_' + str(args.nb_bins) + 'BINs', 'BIN_' + str(bin),
'round_' + str(args.round + 1))
create_folder(next_round_data_path)
save_name1 = name1 + '_' + 'AGE' + str(args.age_penalty) + '_' + \
'SEX' + str(args.sex_penalty) + '_' + \
'DX' + str(args.dx_penalty) + '_' \
'MMSE' + str(args.mmse_penalty) + '.pkl'
save_name2 = name2 + '_' + 'AGE' + str(args.age_penalty) + '_' + \
'SEX' + str(args.sex_penalty) + '_' + \
'DX' + str(args.dx_penalty) + '_' \
'MMSE' + str(args.mmse_penalty) + '.pkl'
# load data
if args.round == 1 and bin == 0:
comb_dataset1 = load_pkl(
os.path.join(curr_round_data_path, name1 + '.pkl'))
comb_dataset2 = load_pkl(
os.path.join(curr_round_data_path, name2 + '.pkl'))
elif args.round == 1 and bin > 0:
comb_dataset1 = load_pkl(
os.path.join(curr_round_data_path, name1 + '.pkl'))
comb_dataset2 = load_pkl(
os.path.join(curr_round_data_path, save_name2))
else:
comb_dataset1 = load_pkl(
os.path.join(curr_round_data_path, save_name1))
comb_dataset2 = load_pkl(
os.path.join(curr_round_data_path, save_name2))
# deep copy
_comb_dataset1 = deepcopy(comb_dataset1)
_comb_dataset2 = deepcopy(comb_dataset2)
for sub_dataset1 in _comb_dataset1.keys():
if sub_dataset1 in matched_subs_dataset1:
del comb_dataset1[sub_dataset1]
for sub_dataset2 in _comb_dataset2.keys():
if sub_dataset2 in matched_subs_dataset2:
del comb_dataset2[sub_dataset2]
# save updated within dataset combs
save_pkl(comb_dataset1, os.path.join(next_round_data_path, save_name1))
save_pkl(comb_dataset2, os.path.join(next_round_data_path, save_name2))
| {
"content_hash": "18c36681dfc52d0f767d77dd93ffe516",
"timestamp": "",
"source": "github",
"line_count": 477,
"max_line_length": 79,
"avg_line_length": 40.486373165618446,
"alnum_prop": 0.5759631317315659,
"repo_name": "ThomasYeoLab/CBIG",
"id": "8bcf4d72205e778ffb998458edd5054f083988b2",
"size": "19361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stable_projects/predict_phenotypes/An2022_gcVAE/matching/step5_pick_well_matched_pairs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35378"
},
{
"name": "C",
"bytes": "2076236"
},
{
"name": "C++",
"bytes": "1461097"
},
{
"name": "CSS",
"bytes": "6852"
},
{
"name": "Fortran",
"bytes": "598090"
},
{
"name": "HTML",
"bytes": "287918"
},
{
"name": "Jupyter Notebook",
"bytes": "569200"
},
{
"name": "MATLAB",
"bytes": "10013692"
},
{
"name": "Makefile",
"bytes": "7902"
},
{
"name": "Objective-C",
"bytes": "77"
},
{
"name": "PostScript",
"bytes": "8416"
},
{
"name": "Python",
"bytes": "2499129"
},
{
"name": "R",
"bytes": "33929"
},
{
"name": "Shell",
"bytes": "1923688"
},
{
"name": "TeX",
"bytes": "8993"
},
{
"name": "Vim Script",
"bytes": "2859"
},
{
"name": "XSLT",
"bytes": "19506"
}
],
"symlink_target": ""
} |
DEPLOYMENT_STAGE = "development"
stage_settings = __import__( DEPLOYMENT_STAGE, globals(), locals())
globals().update(stage_settings.__dict__)
| {
"content_hash": "8c0c6e2faad3979f85ab2cd89d6aa3ea",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 67,
"avg_line_length": 47.666666666666664,
"alnum_prop": 0.7202797202797203,
"repo_name": "HacktivateOrg/poll",
"id": "2111152febdb4ff4219a33963ee6aba1142f9339",
"size": "143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poll/deployment/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3381"
}
],
"symlink_target": ""
} |
"""Support for collections of mapped entities.
The collections package supplies the machinery used to inform the ORM of
collection membership changes. An instrumentation via decoration approach is
used, allowing arbitrary types (including built-ins) to be used as entity
collections without requiring inheritance from a base class.
Instrumentation decoration relays membership change events to the
:class:`.CollectionAttributeImpl` that is currently managing the collection.
The decorators observe function call arguments and return values, tracking
entities entering or leaving the collection. Two decorator approaches are
provided. One is a bundle of generic decorators that map function arguments
and return values to events::
from sqlalchemy.orm.collections import collection
class MyClass(object):
# ...
@collection.adds(1)
def store(self, item):
self.data.append(item)
@collection.removes_return()
def pop(self):
return self.data.pop()
The second approach is a bundle of targeted decorators that wrap appropriate
append and remove notifiers around the mutation methods present in the
standard Python ``list``, ``set`` and ``dict`` interfaces. These could be
specified in terms of generic decorator recipes, but are instead hand-tooled
for increased efficiency. The targeted decorators occasionally implement
adapter-like behavior, such as mapping bulk-set methods (``extend``,
``update``, ``__setslice__``, etc.) into the series of atomic mutation events
that the ORM requires.
The targeted decorators are used internally for automatic instrumentation of
entity collection classes. Every collection class goes through a
transformation process roughly like so:
1. If the class is a built-in, substitute a trivial sub-class
2. Is this class already instrumented?
3. Add in generic decorators
4. Sniff out the collection interface through duck-typing
5. Add targeted decoration to any undecorated interface method
This process modifies the class at runtime, decorating methods and adding some
bookkeeping properties. This isn't possible (or desirable) for built-in
classes like ``list``, so trivial sub-classes are substituted to hold
decoration::
class InstrumentedList(list):
pass
Collection classes can be specified in ``relationship(collection_class=)`` as
types or a function that returns an instance. Collection classes are
inspected and instrumented during the mapper compilation phase. The
collection_class callable will be executed once to produce a specimen
instance, and the type of that specimen will be instrumented. Functions that
return built-in types like ``lists`` will be adapted to produce instrumented
instances.
When extending a known type like ``list``, additional decorations are not
generally not needed. Odds are, the extension method will delegate to a
method that's already instrumented. For example::
class QueueIsh(list):
def push(self, item):
self.append(item)
def shift(self):
return self.pop(0)
There's no need to decorate these methods. ``append`` and ``pop`` are already
instrumented as part of the ``list`` interface. Decorating them would fire
duplicate events, which should be avoided.
The targeted decoration tries not to rely on other methods in the underlying
collection class, but some are unavoidable. Many depend on 'read' methods
being present to properly instrument a 'write', for example, ``__setitem__``
needs ``__getitem__``. "Bulk" methods like ``update`` and ``extend`` may also
reimplemented in terms of atomic appends and removes, so the ``extend``
decoration will actually perform many ``append`` operations and not call the
underlying method at all.
Tight control over bulk operation and the firing of events is also possible by
implementing the instrumentation internally in your methods. The basic
instrumentation package works under the general assumption that collection
mutation will not raise unusual exceptions. If you want to closely
orchestrate append and remove events with exception management, internal
instrumentation may be the answer. Within your method,
``collection_adapter(self)`` will retrieve an object that you can use for
explicit control over triggering append and remove events.
The owning object and :class:`.CollectionAttributeImpl` are also reachable
through the adapter, allowing for some very sophisticated behavior.
"""
import inspect
import operator
import weakref
from ..sql import expression
from .. import util, exc as sa_exc
from . import base
from sqlalchemy.util.compat import inspect_getargspec
__all__ = ['collection', 'collection_adapter',
'mapped_collection', 'column_mapped_collection',
'attribute_mapped_collection']
__instrumentation_mutex = util.threading.Lock()
class _PlainColumnGetter(object):
"""Plain column getter, stores collection of Column objects
directly.
Serializes to a :class:`._SerializableColumnGetterV2`
which has more expensive __call__() performance
and some rare caveats.
"""
def __init__(self, cols):
self.cols = cols
self.composite = len(cols) > 1
def __reduce__(self):
return _SerializableColumnGetterV2._reduce_from_cols(self.cols)
def _cols(self, mapper):
return self.cols
def __call__(self, value):
state = base.instance_state(value)
m = base._state_mapper(state)
key = [
m._get_state_attr_by_column(state, state.dict, col)
for col in self._cols(m)
]
if self.composite:
return tuple(key)
else:
return key[0]
class _SerializableColumnGetter(object):
"""Column-based getter used in version 0.7.6 only.
Remains here for pickle compatibility with 0.7.6.
"""
def __init__(self, colkeys):
self.colkeys = colkeys
self.composite = len(colkeys) > 1
def __reduce__(self):
return _SerializableColumnGetter, (self.colkeys,)
def __call__(self, value):
state = base.instance_state(value)
m = base._state_mapper(state)
key = [m._get_state_attr_by_column(
state, state.dict,
m.mapped_table.columns[k])
for k in self.colkeys]
if self.composite:
return tuple(key)
else:
return key[0]
class _SerializableColumnGetterV2(_PlainColumnGetter):
"""Updated serializable getter which deals with
multi-table mapped classes.
Two extremely unusual cases are not supported.
Mappings which have tables across multiple metadata
objects, or which are mapped to non-Table selectables
linked across inheriting mappers may fail to function
here.
"""
def __init__(self, colkeys):
self.colkeys = colkeys
self.composite = len(colkeys) > 1
def __reduce__(self):
return self.__class__, (self.colkeys,)
@classmethod
def _reduce_from_cols(cls, cols):
def _table_key(c):
if not isinstance(c.table, expression.TableClause):
return None
else:
return c.table.key
colkeys = [(c.key, _table_key(c)) for c in cols]
return _SerializableColumnGetterV2, (colkeys,)
def _cols(self, mapper):
cols = []
metadata = getattr(mapper.local_table, 'metadata', None)
for (ckey, tkey) in self.colkeys:
if tkey is None or \
metadata is None or \
tkey not in metadata:
cols.append(mapper.local_table.c[ckey])
else:
cols.append(metadata.tables[tkey].c[ckey])
return cols
def column_mapped_collection(mapping_spec):
"""A dictionary-based collection type with column-based keying.
Returns a :class:`.MappedCollection` factory with a keying function
generated from mapping_spec, which may be a Column or a sequence
of Columns.
The key value must be immutable for the lifetime of the object. You
can not, for example, map on foreign key values if those key values will
change during the session, i.e. from None to a database-assigned integer
after a session flush.
"""
cols = [expression._only_column_elements(q, "mapping_spec")
for q in util.to_list(mapping_spec)
]
keyfunc = _PlainColumnGetter(cols)
return lambda: MappedCollection(keyfunc)
class _SerializableAttrGetter(object):
def __init__(self, name):
self.name = name
self.getter = operator.attrgetter(name)
def __call__(self, target):
return self.getter(target)
def __reduce__(self):
return _SerializableAttrGetter, (self.name, )
def attribute_mapped_collection(attr_name):
"""A dictionary-based collection type with attribute-based keying.
Returns a :class:`.MappedCollection` factory with a keying based on the
'attr_name' attribute of entities in the collection, where ``attr_name``
is the string name of the attribute.
The key value must be immutable for the lifetime of the object. You
can not, for example, map on foreign key values if those key values will
change during the session, i.e. from None to a database-assigned integer
after a session flush.
"""
getter = _SerializableAttrGetter(attr_name)
return lambda: MappedCollection(getter)
def mapped_collection(keyfunc):
"""A dictionary-based collection type with arbitrary keying.
Returns a :class:`.MappedCollection` factory with a keying function
generated from keyfunc, a callable that takes an entity and returns a
key value.
The key value must be immutable for the lifetime of the object. You
can not, for example, map on foreign key values if those key values will
change during the session, i.e. from None to a database-assigned integer
after a session flush.
"""
return lambda: MappedCollection(keyfunc)
class collection(object):
"""Decorators for entity collection classes.
The decorators fall into two groups: annotations and interception recipes.
The annotating decorators (appender, remover, iterator, linker, converter,
internally_instrumented) indicate the method's purpose and take no
arguments. They are not written with parens::
@collection.appender
def append(self, append): ...
The recipe decorators all require parens, even those that take no
arguments::
@collection.adds('entity')
def insert(self, position, entity): ...
@collection.removes_return()
def popitem(self): ...
"""
# Bundled as a class solely for ease of use: packaging, doc strings,
# importability.
@staticmethod
def appender(fn):
"""Tag the method as the collection appender.
The appender method is called with one positional argument: the value
to append. The method will be automatically decorated with 'adds(1)'
if not already decorated::
@collection.appender
def add(self, append): ...
# or, equivalently
@collection.appender
@collection.adds(1)
def add(self, append): ...
# for mapping type, an 'append' may kick out a previous value
# that occupies that slot. consider d['a'] = 'foo'- any previous
# value in d['a'] is discarded.
@collection.appender
@collection.replaces(1)
def add(self, entity):
key = some_key_func(entity)
previous = None
if key in self:
previous = self[key]
self[key] = entity
return previous
If the value to append is not allowed in the collection, you may
raise an exception. Something to remember is that the appender
will be called for each object mapped by a database query. If the
database contains rows that violate your collection semantics, you
will need to get creative to fix the problem, as access via the
collection will not work.
If the appender method is internally instrumented, you must also
receive the keyword argument '_sa_initiator' and ensure its
promulgation to collection events.
"""
fn._sa_instrument_role = 'appender'
return fn
@staticmethod
def remover(fn):
"""Tag the method as the collection remover.
The remover method is called with one positional argument: the value
to remove. The method will be automatically decorated with
:meth:`removes_return` if not already decorated::
@collection.remover
def zap(self, entity): ...
# or, equivalently
@collection.remover
@collection.removes_return()
def zap(self, ): ...
If the value to remove is not present in the collection, you may
raise an exception or return None to ignore the error.
If the remove method is internally instrumented, you must also
receive the keyword argument '_sa_initiator' and ensure its
promulgation to collection events.
"""
fn._sa_instrument_role = 'remover'
return fn
@staticmethod
def iterator(fn):
"""Tag the method as the collection remover.
The iterator method is called with no arguments. It is expected to
return an iterator over all collection members::
@collection.iterator
def __iter__(self): ...
"""
fn._sa_instrument_role = 'iterator'
return fn
@staticmethod
def internally_instrumented(fn):
"""Tag the method as instrumented.
This tag will prevent any decoration from being applied to the
method. Use this if you are orchestrating your own calls to
:func:`.collection_adapter` in one of the basic SQLAlchemy
interface methods, or to prevent an automatic ABC method
decoration from wrapping your implementation::
# normally an 'extend' method on a list-like class would be
# automatically intercepted and re-implemented in terms of
# SQLAlchemy events and append(). your implementation will
# never be called, unless:
@collection.internally_instrumented
def extend(self, items): ...
"""
fn._sa_instrumented = True
return fn
@staticmethod
def linker(fn):
"""Tag the method as a "linked to attribute" event handler.
This optional event handler will be called when the collection class
is linked to or unlinked from the InstrumentedAttribute. It is
invoked immediately after the '_sa_adapter' property is set on
the instance. A single argument is passed: the collection adapter
that has been linked, or None if unlinking.
.. deprecated:: 1.0.0 - the :meth:`.collection.linker` handler
is superseded by the :meth:`.AttributeEvents.init_collection`
and :meth:`.AttributeEvents.dispose_collection` handlers.
"""
fn._sa_instrument_role = 'linker'
return fn
link = linker
"""deprecated; synonym for :meth:`.collection.linker`."""
@staticmethod
def converter(fn):
"""Tag the method as the collection converter.
This optional method will be called when a collection is being
replaced entirely, as in::
myobj.acollection = [newvalue1, newvalue2]
The converter method will receive the object being assigned and should
return an iterable of values suitable for use by the ``appender``
method. A converter must not assign values or mutate the collection,
its sole job is to adapt the value the user provides into an iterable
of values for the ORM's use.
The default converter implementation will use duck-typing to do the
conversion. A dict-like collection will be convert into an iterable
of dictionary values, and other types will simply be iterated::
@collection.converter
def convert(self, other): ...
If the duck-typing of the object does not match the type of this
collection, a TypeError is raised.
Supply an implementation of this method if you want to expand the
range of possible types that can be assigned in bulk or perform
validation on the values about to be assigned.
"""
fn._sa_instrument_role = 'converter'
return fn
@staticmethod
def adds(arg):
"""Mark the method as adding an entity to the collection.
Adds "add to collection" handling to the method. The decorator
argument indicates which method argument holds the SQLAlchemy-relevant
value. Arguments can be specified positionally (i.e. integer) or by
name::
@collection.adds(1)
def push(self, item): ...
@collection.adds('entity')
def do_stuff(self, thing, entity=None): ...
"""
def decorator(fn):
fn._sa_instrument_before = ('fire_append_event', arg)
return fn
return decorator
@staticmethod
def replaces(arg):
"""Mark the method as replacing an entity in the collection.
Adds "add to collection" and "remove from collection" handling to
the method. The decorator argument indicates which method argument
holds the SQLAlchemy-relevant value to be added, and return value, if
any will be considered the value to remove.
Arguments can be specified positionally (i.e. integer) or by name::
@collection.replaces(2)
def __setitem__(self, index, item): ...
"""
def decorator(fn):
fn._sa_instrument_before = ('fire_append_event', arg)
fn._sa_instrument_after = 'fire_remove_event'
return fn
return decorator
@staticmethod
def removes(arg):
"""Mark the method as removing an entity in the collection.
Adds "remove from collection" handling to the method. The decorator
argument indicates which method argument holds the SQLAlchemy-relevant
value to be removed. Arguments can be specified positionally (i.e.
integer) or by name::
@collection.removes(1)
def zap(self, item): ...
For methods where the value to remove is not known at call-time, use
collection.removes_return.
"""
def decorator(fn):
fn._sa_instrument_before = ('fire_remove_event', arg)
return fn
return decorator
@staticmethod
def removes_return():
"""Mark the method as removing an entity in the collection.
Adds "remove from collection" handling to the method. The return
value of the method, if any, is considered the value to remove. The
method arguments are not inspected::
@collection.removes_return()
def pop(self): ...
For methods where the value to remove is known at call-time, use
collection.remove.
"""
def decorator(fn):
fn._sa_instrument_after = 'fire_remove_event'
return fn
return decorator
collection_adapter = operator.attrgetter('_sa_adapter')
"""Fetch the :class:`.CollectionAdapter` for a collection."""
class CollectionAdapter(object):
"""Bridges between the ORM and arbitrary Python collections.
Proxies base-level collection operations (append, remove, iterate)
to the underlying Python collection, and emits add/remove events for
entities entering or leaving the collection.
The ORM uses :class:`.CollectionAdapter` exclusively for interaction with
entity collections.
"""
__slots__ = (
'attr', '_key', '_data', 'owner_state', '_converter', 'invalidated')
def __init__(self, attr, owner_state, data):
self.attr = attr
self._key = attr.key
self._data = weakref.ref(data)
self.owner_state = owner_state
data._sa_adapter = self
self._converter = data._sa_converter
self.invalidated = False
def _warn_invalidated(self):
util.warn("This collection has been invalidated.")
@property
def data(self):
"The entity collection being adapted."
return self._data()
@property
def _referenced_by_owner(self):
"""return True if the owner state still refers to this collection.
This will return False within a bulk replace operation,
where this collection is the one being replaced.
"""
return self.owner_state.dict[self._key] is self._data()
def bulk_appender(self):
return self._data()._sa_appender
def append_with_event(self, item, initiator=None):
"""Add an entity to the collection, firing mutation events."""
self._data()._sa_appender(item, _sa_initiator=initiator)
def append_without_event(self, item):
"""Add or restore an entity to the collection, firing no events."""
self._data()._sa_appender(item, _sa_initiator=False)
def append_multiple_without_event(self, items):
"""Add or restore an entity to the collection, firing no events."""
appender = self._data()._sa_appender
for item in items:
appender(item, _sa_initiator=False)
def bulk_remover(self):
return self._data()._sa_remover
def remove_with_event(self, item, initiator=None):
"""Remove an entity from the collection, firing mutation events."""
self._data()._sa_remover(item, _sa_initiator=initiator)
def remove_without_event(self, item):
"""Remove an entity from the collection, firing no events."""
self._data()._sa_remover(item, _sa_initiator=False)
def clear_with_event(self, initiator=None):
"""Empty the collection, firing a mutation event for each entity."""
remover = self._data()._sa_remover
for item in list(self):
remover(item, _sa_initiator=initiator)
def clear_without_event(self):
"""Empty the collection, firing no events."""
remover = self._data()._sa_remover
for item in list(self):
remover(item, _sa_initiator=False)
def __iter__(self):
"""Iterate over entities in the collection."""
return iter(self._data()._sa_iterator())
def __len__(self):
"""Count entities in the collection."""
return len(list(self._data()._sa_iterator()))
def __bool__(self):
return True
__nonzero__ = __bool__
def fire_append_event(self, item, initiator=None):
"""Notify that a entity has entered the collection.
Initiator is a token owned by the InstrumentedAttribute that
initiated the membership mutation, and should be left as None
unless you are passing along an initiator value from a chained
operation.
"""
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
return self.attr.fire_append_event(
self.owner_state,
self.owner_state.dict,
item, initiator)
else:
return item
def fire_remove_event(self, item, initiator=None):
"""Notify that a entity has been removed from the collection.
Initiator is the InstrumentedAttribute that initiated the membership
mutation, and should be left as None unless you are passing along
an initiator value from a chained operation.
"""
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
self.attr.fire_remove_event(
self.owner_state,
self.owner_state.dict,
item, initiator)
def fire_pre_remove_event(self, initiator=None):
"""Notify that an entity is about to be removed from the collection.
Only called if the entity cannot be removed after calling
fire_remove_event().
"""
if self.invalidated:
self._warn_invalidated()
self.attr.fire_pre_remove_event(
self.owner_state,
self.owner_state.dict,
initiator=initiator)
def __getstate__(self):
return {'key': self._key,
'owner_state': self.owner_state,
'data': self.data}
def __setstate__(self, d):
self._key = d['key']
self.owner_state = d['owner_state']
self._data = weakref.ref(d['data'])
def bulk_replace(values, existing_adapter, new_adapter):
"""Load a new collection, firing events based on prior like membership.
Appends instances in ``values`` onto the ``new_adapter``. Events will be
fired for any instance not present in the ``existing_adapter``. Any
instances in ``existing_adapter`` not present in ``values`` will have
remove events fired upon them.
:param values: An iterable of collection member instances
:param existing_adapter: A :class:`.CollectionAdapter` of
instances to be replaced
:param new_adapter: An empty :class:`.CollectionAdapter`
to load with ``values``
"""
assert isinstance(values, list)
idset = util.IdentitySet
existing_idset = idset(existing_adapter or ())
constants = existing_idset.intersection(values or ())
additions = idset(values or ()).difference(constants)
removals = existing_idset.difference(constants)
appender = new_adapter.bulk_appender()
for member in values or ():
if member in additions:
appender(member)
elif member in constants:
appender(member, _sa_initiator=False)
if existing_adapter:
remover = existing_adapter.bulk_remover()
for member in removals:
remover(member)
def prepare_instrumentation(factory):
"""Prepare a callable for future use as a collection class factory.
Given a collection class factory (either a type or no-arg callable),
return another factory that will produce compatible instances when
called.
This function is responsible for converting collection_class=list
into the run-time behavior of collection_class=InstrumentedList.
"""
# Convert a builtin to 'Instrumented*'
if factory in __canned_instrumentation:
factory = __canned_instrumentation[factory]
# Create a specimen
cls = type(factory())
# Did factory callable return a builtin?
if cls in __canned_instrumentation:
# Wrap it so that it returns our 'Instrumented*'
factory = __converting_factory(cls, factory)
cls = factory()
# Instrument the class if needed.
if __instrumentation_mutex.acquire():
try:
if getattr(cls, '_sa_instrumented', None) != id(cls):
_instrument_class(cls)
finally:
__instrumentation_mutex.release()
return factory
def __converting_factory(specimen_cls, original_factory):
"""Return a wrapper that converts a "canned" collection like
set, dict, list into the Instrumented* version.
"""
instrumented_cls = __canned_instrumentation[specimen_cls]
def wrapper():
collection = original_factory()
return instrumented_cls(collection)
# often flawed but better than nothing
wrapper.__name__ = "%sWrapper" % original_factory.__name__
wrapper.__doc__ = original_factory.__doc__
return wrapper
def _instrument_class(cls):
"""Modify methods in a class and install instrumentation."""
# In the normal call flow, a request for any of the 3 basic collection
# types is transformed into one of our trivial subclasses
# (e.g. InstrumentedList). Catch anything else that sneaks in here...
if cls.__module__ == '__builtin__':
raise sa_exc.ArgumentError(
"Can not instrument a built-in type. Use a "
"subclass, even a trivial one.")
roles, methods = _locate_roles_and_methods(cls)
_setup_canned_roles(cls, roles, methods)
_assert_required_roles(cls, roles, methods)
_set_collection_attributes(cls, roles, methods)
def _locate_roles_and_methods(cls):
"""search for _sa_instrument_role-decorated methods in
method resolution order, assign to roles.
"""
roles = {}
methods = {}
for supercls in cls.__mro__:
for name, method in vars(supercls).items():
if not util.callable(method):
continue
# note role declarations
if hasattr(method, '_sa_instrument_role'):
role = method._sa_instrument_role
assert role in ('appender', 'remover', 'iterator',
'linker', 'converter')
roles.setdefault(role, name)
# transfer instrumentation requests from decorated function
# to the combined queue
before, after = None, None
if hasattr(method, '_sa_instrument_before'):
op, argument = method._sa_instrument_before
assert op in ('fire_append_event', 'fire_remove_event')
before = op, argument
if hasattr(method, '_sa_instrument_after'):
op = method._sa_instrument_after
assert op in ('fire_append_event', 'fire_remove_event')
after = op
if before:
methods[name] = before + (after, )
elif after:
methods[name] = None, None, after
return roles, methods
def _setup_canned_roles(cls, roles, methods):
"""see if this class has "canned" roles based on a known
collection type (dict, set, list). Apply those roles
as needed to the "roles" dictionary, and also
prepare "decorator" methods
"""
collection_type = util.duck_type_collection(cls)
if collection_type in __interfaces:
canned_roles, decorators = __interfaces[collection_type]
for role, name in canned_roles.items():
roles.setdefault(role, name)
# apply ABC auto-decoration to methods that need it
for method, decorator in decorators.items():
fn = getattr(cls, method, None)
if (fn and method not in methods and
not hasattr(fn, '_sa_instrumented')):
setattr(cls, method, decorator(fn))
def _assert_required_roles(cls, roles, methods):
"""ensure all roles are present, and apply implicit instrumentation if
needed
"""
if 'appender' not in roles or not hasattr(cls, roles['appender']):
raise sa_exc.ArgumentError(
"Type %s must elect an appender method to be "
"a collection class" % cls.__name__)
elif (roles['appender'] not in methods and
not hasattr(getattr(cls, roles['appender']), '_sa_instrumented')):
methods[roles['appender']] = ('fire_append_event', 1, None)
if 'remover' not in roles or not hasattr(cls, roles['remover']):
raise sa_exc.ArgumentError(
"Type %s must elect a remover method to be "
"a collection class" % cls.__name__)
elif (roles['remover'] not in methods and
not hasattr(getattr(cls, roles['remover']), '_sa_instrumented')):
methods[roles['remover']] = ('fire_remove_event', 1, None)
if 'iterator' not in roles or not hasattr(cls, roles['iterator']):
raise sa_exc.ArgumentError(
"Type %s must elect an iterator method to be "
"a collection class" % cls.__name__)
def _set_collection_attributes(cls, roles, methods):
"""apply ad-hoc instrumentation from decorators, class-level defaults
and implicit role declarations
"""
for method_name, (before, argument, after) in methods.items():
setattr(cls, method_name,
_instrument_membership_mutator(getattr(cls, method_name),
before, argument, after))
# intern the role map
for role, method_name in roles.items():
setattr(cls, '_sa_%s' % role, getattr(cls, method_name))
cls._sa_adapter = None
if not hasattr(cls, '_sa_converter'):
cls._sa_converter = None
cls._sa_instrumented = id(cls)
def _instrument_membership_mutator(method, before, argument, after):
"""Route method args and/or return value through the collection
adapter."""
# This isn't smart enough to handle @adds(1) for 'def fn(self, (a, b))'
if before:
fn_args = list(util.flatten_iterator(inspect_getargspec(method)[0]))
if isinstance(argument, int):
pos_arg = argument
named_arg = len(fn_args) > argument and fn_args[argument] or None
else:
if argument in fn_args:
pos_arg = fn_args.index(argument)
else:
pos_arg = None
named_arg = argument
del fn_args
def wrapper(*args, **kw):
if before:
if pos_arg is None:
if named_arg not in kw:
raise sa_exc.ArgumentError(
"Missing argument %s" % argument)
value = kw[named_arg]
else:
if len(args) > pos_arg:
value = args[pos_arg]
elif named_arg in kw:
value = kw[named_arg]
else:
raise sa_exc.ArgumentError(
"Missing argument %s" % argument)
initiator = kw.pop('_sa_initiator', None)
if initiator is False:
executor = None
else:
executor = args[0]._sa_adapter
if before and executor:
getattr(executor, before)(value, initiator)
if not after or not executor:
return method(*args, **kw)
else:
res = method(*args, **kw)
if res is not None:
getattr(executor, after)(res, initiator)
return res
wrapper._sa_instrumented = True
if hasattr(method, "_sa_instrument_role"):
wrapper._sa_instrument_role = method._sa_instrument_role
wrapper.__name__ = method.__name__
wrapper.__doc__ = method.__doc__
return wrapper
def __set(collection, item, _sa_initiator=None):
"""Run set events, may eventually be inlined into decorators."""
if _sa_initiator is not False:
executor = collection._sa_adapter
if executor:
item = executor.fire_append_event(item, _sa_initiator)
return item
def __del(collection, item, _sa_initiator=None):
"""Run del events, may eventually be inlined into decorators."""
if _sa_initiator is not False:
executor = collection._sa_adapter
if executor:
executor.fire_remove_event(item, _sa_initiator)
def __before_delete(collection, _sa_initiator=None):
"""Special method to run 'commit existing value' methods"""
executor = collection._sa_adapter
if executor:
executor.fire_pre_remove_event(_sa_initiator)
def _list_decorators():
"""Tailored instrumentation wrappers for any list-like class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(list, fn.__name__).__doc__
def append(fn):
def append(self, item, _sa_initiator=None):
item = __set(self, item, _sa_initiator)
fn(self, item)
_tidy(append)
return append
def remove(fn):
def remove(self, value, _sa_initiator=None):
__before_delete(self, _sa_initiator)
# testlib.pragma exempt:__eq__
fn(self, value)
__del(self, value, _sa_initiator)
_tidy(remove)
return remove
def insert(fn):
def insert(self, index, value):
value = __set(self, value)
fn(self, index, value)
_tidy(insert)
return insert
def __setitem__(fn):
def __setitem__(self, index, value):
if not isinstance(index, slice):
existing = self[index]
if existing is not None:
__del(self, existing)
value = __set(self, value)
fn(self, index, value)
else:
# slice assignment requires __delitem__, insert, __len__
step = index.step or 1
start = index.start or 0
if start < 0:
start += len(self)
if index.stop is not None:
stop = index.stop
else:
stop = len(self)
if stop < 0:
stop += len(self)
if step == 1:
for i in range(start, stop, step):
if len(self) > start:
del self[start]
for i, item in enumerate(value):
self.insert(i + start, item)
else:
rng = list(range(start, stop, step))
if len(value) != len(rng):
raise ValueError(
"attempt to assign sequence of size %s to "
"extended slice of size %s" % (len(value),
len(rng)))
for i, item in zip(rng, value):
self.__setitem__(i, item)
_tidy(__setitem__)
return __setitem__
def __delitem__(fn):
def __delitem__(self, index):
if not isinstance(index, slice):
item = self[index]
__del(self, item)
fn(self, index)
else:
# slice deletion requires __getslice__ and a slice-groking
# __getitem__ for stepped deletion
# note: not breaking this into atomic dels
for item in self[index]:
__del(self, item)
fn(self, index)
_tidy(__delitem__)
return __delitem__
if util.py2k:
def __setslice__(fn):
def __setslice__(self, start, end, values):
for value in self[start:end]:
__del(self, value)
values = [__set(self, value) for value in values]
fn(self, start, end, values)
_tidy(__setslice__)
return __setslice__
def __delslice__(fn):
def __delslice__(self, start, end):
for value in self[start:end]:
__del(self, value)
fn(self, start, end)
_tidy(__delslice__)
return __delslice__
def extend(fn):
def extend(self, iterable):
for value in iterable:
self.append(value)
_tidy(extend)
return extend
def __iadd__(fn):
def __iadd__(self, iterable):
# list.__iadd__ takes any iterable and seems to let TypeError
# raise as-is instead of returning NotImplemented
for value in iterable:
self.append(value)
return self
_tidy(__iadd__)
return __iadd__
def pop(fn):
def pop(self, index=-1):
__before_delete(self)
item = fn(self, index)
__del(self, item)
return item
_tidy(pop)
return pop
if not util.py2k:
def clear(fn):
def clear(self, index=-1):
for item in self:
__del(self, item)
fn(self)
_tidy(clear)
return clear
# __imul__ : not wrapping this. all members of the collection are already
# present, so no need to fire appends... wrapping it with an explicit
# decorator is still possible, so events on *= can be had if they're
# desired. hard to imagine a use case for __imul__, though.
l = locals().copy()
l.pop('_tidy')
return l
def _dict_decorators():
"""Tailored instrumentation wrappers for any dict-like mapping class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(dict, fn.__name__).__doc__
Unspecified = util.symbol('Unspecified')
def __setitem__(fn):
def __setitem__(self, key, value, _sa_initiator=None):
if key in self:
__del(self, self[key], _sa_initiator)
value = __set(self, value, _sa_initiator)
fn(self, key, value)
_tidy(__setitem__)
return __setitem__
def __delitem__(fn):
def __delitem__(self, key, _sa_initiator=None):
if key in self:
__del(self, self[key], _sa_initiator)
fn(self, key)
_tidy(__delitem__)
return __delitem__
def clear(fn):
def clear(self):
for key in self:
__del(self, self[key])
fn(self)
_tidy(clear)
return clear
def pop(fn):
def pop(self, key, default=Unspecified):
if key in self:
__del(self, self[key])
if default is Unspecified:
return fn(self, key)
else:
return fn(self, key, default)
_tidy(pop)
return pop
def popitem(fn):
def popitem(self):
__before_delete(self)
item = fn(self)
__del(self, item[1])
return item
_tidy(popitem)
return popitem
def setdefault(fn):
def setdefault(self, key, default=None):
if key not in self:
self.__setitem__(key, default)
return default
else:
return self.__getitem__(key)
_tidy(setdefault)
return setdefault
def update(fn):
def update(self, __other=Unspecified, **kw):
if __other is not Unspecified:
if hasattr(__other, 'keys'):
for key in list(__other):
if (key not in self or
self[key] is not __other[key]):
self[key] = __other[key]
else:
for key, value in __other:
if key not in self or self[key] is not value:
self[key] = value
for key in kw:
if key not in self or self[key] is not kw[key]:
self[key] = kw[key]
_tidy(update)
return update
l = locals().copy()
l.pop('_tidy')
l.pop('Unspecified')
return l
_set_binop_bases = (set, frozenset)
def _set_binops_check_strict(self, obj):
"""Allow only set, frozenset and self.__class__-derived
objects in binops."""
return isinstance(obj, _set_binop_bases + (self.__class__,))
def _set_binops_check_loose(self, obj):
"""Allow anything set-like to participate in set binops."""
return (isinstance(obj, _set_binop_bases + (self.__class__,)) or
util.duck_type_collection(obj) == set)
def _set_decorators():
"""Tailored instrumentation wrappers for any set-like class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(set, fn.__name__).__doc__
Unspecified = util.symbol('Unspecified')
def add(fn):
def add(self, value, _sa_initiator=None):
if value not in self:
value = __set(self, value, _sa_initiator)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(add)
return add
def discard(fn):
def discard(self, value, _sa_initiator=None):
# testlib.pragma exempt:__hash__
if value in self:
__del(self, value, _sa_initiator)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(discard)
return discard
def remove(fn):
def remove(self, value, _sa_initiator=None):
# testlib.pragma exempt:__hash__
if value in self:
__del(self, value, _sa_initiator)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(remove)
return remove
def pop(fn):
def pop(self):
__before_delete(self)
item = fn(self)
__del(self, item)
return item
_tidy(pop)
return pop
def clear(fn):
def clear(self):
for item in list(self):
self.remove(item)
_tidy(clear)
return clear
def update(fn):
def update(self, value):
for item in value:
self.add(item)
_tidy(update)
return update
def __ior__(fn):
def __ior__(self, value):
if not _set_binops_check_strict(self, value):
return NotImplemented
for item in value:
self.add(item)
return self
_tidy(__ior__)
return __ior__
def difference_update(fn):
def difference_update(self, value):
for item in value:
self.discard(item)
_tidy(difference_update)
return difference_update
def __isub__(fn):
def __isub__(self, value):
if not _set_binops_check_strict(self, value):
return NotImplemented
for item in value:
self.discard(item)
return self
_tidy(__isub__)
return __isub__
def intersection_update(fn):
def intersection_update(self, other):
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
_tidy(intersection_update)
return intersection_update
def __iand__(fn):
def __iand__(self, other):
if not _set_binops_check_strict(self, other):
return NotImplemented
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
return self
_tidy(__iand__)
return __iand__
def symmetric_difference_update(fn):
def symmetric_difference_update(self, other):
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
_tidy(symmetric_difference_update)
return symmetric_difference_update
def __ixor__(fn):
def __ixor__(self, other):
if not _set_binops_check_strict(self, other):
return NotImplemented
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
return self
_tidy(__ixor__)
return __ixor__
l = locals().copy()
l.pop('_tidy')
l.pop('Unspecified')
return l
class InstrumentedList(list):
"""An instrumented version of the built-in list."""
class InstrumentedSet(set):
"""An instrumented version of the built-in set."""
class InstrumentedDict(dict):
"""An instrumented version of the built-in dict."""
__canned_instrumentation = {
list: InstrumentedList,
set: InstrumentedSet,
dict: InstrumentedDict,
}
__interfaces = {
list: (
{'appender': 'append', 'remover': 'remove',
'iterator': '__iter__'}, _list_decorators()
),
set: ({'appender': 'add',
'remover': 'remove',
'iterator': '__iter__'}, _set_decorators()
),
# decorators are required for dicts and object collections.
dict: ({'iterator': 'values'}, _dict_decorators()) if util.py3k
else ({'iterator': 'itervalues'}, _dict_decorators()),
}
class MappedCollection(dict):
"""A basic dictionary-based collection class.
Extends dict with the minimal bag semantics that collection
classes require. ``set`` and ``remove`` are implemented in terms
of a keying function: any callable that takes an object and
returns an object for use as a dictionary key.
"""
def __init__(self, keyfunc):
"""Create a new collection with keying provided by keyfunc.
keyfunc may be any callable that takes an object and returns an object
for use as a dictionary key.
The keyfunc will be called every time the ORM needs to add a member by
value-only (such as when loading instances from the database) or
remove a member. The usual cautions about dictionary keying apply-
``keyfunc(object)`` should return the same output for the life of the
collection. Keying based on mutable properties can result in
unreachable instances "lost" in the collection.
"""
self.keyfunc = keyfunc
@collection.appender
@collection.internally_instrumented
def set(self, value, _sa_initiator=None):
"""Add an item by value, consulting the keyfunc for the key."""
key = self.keyfunc(value)
self.__setitem__(key, value, _sa_initiator)
@collection.remover
@collection.internally_instrumented
def remove(self, value, _sa_initiator=None):
"""Remove an item by value, consulting the keyfunc for the key."""
key = self.keyfunc(value)
# Let self[key] raise if key is not in this collection
# testlib.pragma exempt:__ne__
if self[key] != value:
raise sa_exc.InvalidRequestError(
"Can not remove '%s': collection holds '%s' for key '%s'. "
"Possible cause: is the MappedCollection key function "
"based on mutable properties or properties that only obtain "
"values after flush?" %
(value, self[key], key))
self.__delitem__(key, _sa_initiator)
@collection.converter
def _convert(self, dictlike):
"""Validate and convert a dict-like object into values for set()ing.
This is called behind the scenes when a MappedCollection is replaced
entirely by another collection, as in::
myobj.mappedcollection = {'a':obj1, 'b': obj2} # ...
Raises a TypeError if the key in any (key, value) pair in the dictlike
object does not match the key that this collection's keyfunc would
have assigned for that value.
"""
for incoming_key, value in util.dictlike_iteritems(dictlike):
new_key = self.keyfunc(value)
if incoming_key != new_key:
raise TypeError(
"Found incompatible key %r for value %r; this "
"collection's "
"keying function requires a key of %r for this value." % (
incoming_key, value, new_key))
yield value
# ensure instrumentation is associated with
# these built-in classes; if a user-defined class
# subclasses these and uses @internally_instrumented,
# the superclass is otherwise not instrumented.
# see [ticket:2406].
_instrument_class(MappedCollection)
_instrument_class(InstrumentedList)
_instrument_class(InstrumentedSet)
| {
"content_hash": "37b11888df9300351a004602f6dd4665",
"timestamp": "",
"source": "github",
"line_count": 1539,
"max_line_length": 78,
"avg_line_length": 33.708252111760885,
"alnum_prop": 0.6019816103475528,
"repo_name": "ThiefMaster/sqlalchemy",
"id": "58a69227c1d831dea728476837fd67fd6e1e1c8e",
"size": "52116",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/sqlalchemy/orm/collections.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "46062"
},
{
"name": "Python",
"bytes": "8929926"
}
],
"symlink_target": ""
} |
from . import molecule_util
from . import rna_seq_util
from . import taxonomy_util
from . import warning_util
from . import mongo_util
from . import file_util
from . import chem_util
| {
"content_hash": "7a05f8f454645d6bdbe57b47a30a8e20",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 27,
"avg_line_length": 26.142857142857142,
"alnum_prop": 0.7650273224043715,
"repo_name": "KarrLab/kinetic_datanator",
"id": "00dc351aee1a2082b3963ad721e1cae8eb2af576",
"size": "183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datanator/util/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1217"
},
{
"name": "Dockerfile",
"bytes": "171"
},
{
"name": "HTML",
"bytes": "50579"
},
{
"name": "Python",
"bytes": "980025"
}
],
"symlink_target": ""
} |
"""The main model training loop."""
import functools
import os
import time
from typing import Callable, Dict, Iterable, Mapping, Optional, Tuple, Type, Union
from absl import logging
from clu import checkpoint
from clu import metric_writers
from clu import metrics
from clu import parameter_overview
from clu import periodic_actions
import flax
from flax import linen as nn
import jax
import jax.numpy as jnp
import ml_collections
import numpy as np
import optax
from savi.lib import evaluator
from savi.lib import input_pipeline
from savi.lib import losses
from savi.lib import utils
import tensorflow as tf
Array = jnp.ndarray
ArrayTree = Union[Array, Iterable["ArrayTree"], Mapping[str, "ArrayTree"]] # pytype: disable=not-supported-yet
PRNGKey = Array
def train_step(
model: nn.Module,
rng: PRNGKey,
step: int,
state_vars: flax.core.FrozenDict,
opt: flax.optim.Optimizer, # pytype: disable=module-attr
batch: Dict[str, ArrayTree],
loss_fn: losses.LossFn,
learning_rate_fn: Callable[[Array], Array],
train_metrics_cls: Type[metrics.Collection],
predicted_max_num_instances: int,
ground_truth_max_num_instances: int,
conditioning_key: Optional[str] = None,
max_grad_norm: Optional[float] = None,
) -> Tuple[flax.optim.Optimizer, flax.core.FrozenDict, PRNGKey, # pytype: disable=module-attr
metrics.Collection, int]:
"""Perform a single training step.
Args:
model: Model used in training step.
rng: Random number key
step: Which training step we are on.
state_vars: Accessory variables.
opt: The optimizer to use to minimize loss_fn.
batch: Training inputs for this step.
loss_fn: Loss function that takes model predictions and a batch of data.
learning_rate_fn: Function that outputs learning rate as jnp.float32 given
step as jnp.int*.
train_metrics_cls: The metrics collection for computing training metrics.
predicted_max_num_instances: Maximum number of instances in prediction.
ground_truth_max_num_instances: Maximum number of instances in ground truth,
including background (which counts as a separate instance).
conditioning_key: Optional string. If provided, defines the batch key to be
used as conditioning signal for the model. Otherwise this is inferred from
the available keys in the batch.
max_grad_norm: Optional float, if not None, clip gradients to the specified
maximum norm.
Returns:
Tuple of the updated opt, state_vars, new random number key,
metrics update, and step + 1. Note that some of this info is stored in
TrainState, but here it is unpacked.
"""
# Split PRNGKey and bind to host / device.
new_rng, rng = jax.random.split(rng)
rng = jax.random.fold_in(rng, jax.host_id())
rng = jax.random.fold_in(rng, jax.lax.axis_index("batch"))
init_rng, dropout_rng = jax.random.split(rng, 2)
mutable_var_keys = list(state_vars.keys()) + ["intermediates"]
conditioning = batch[conditioning_key] if conditioning_key else None
def train_loss_fn(params, state_vars):
preds, mutable_vars = model.apply(
{"params": params, **state_vars}, video=batch["video"],
conditioning=conditioning, mutable=mutable_var_keys,
rngs={"state_init": init_rng, "dropout": dropout_rng}, train=True,
padding_mask=batch.get("padding_mask"))
# Filter intermediates, as we do not want to store them in the TrainState.
state_vars = utils.filter_key_from_frozen_dict(
mutable_vars, key="intermediates")
loss, loss_aux = loss_fn(preds, batch)
return loss, (state_vars, preds, loss_aux)
grad_fn = jax.value_and_grad(train_loss_fn, has_aux=True)
(loss, (state_vars, preds, loss_aux)), grad = grad_fn(opt.target, state_vars)
# Compute average gradient across multiple workers.
grad = jax.lax.pmean(grad, axis_name="batch")
if max_grad_norm is not None:
grad = utils.clip_grads(grad, max_grad_norm)
# Subtracting 1 from step as we start from initial step 1 instead of 0.
learning_rate = learning_rate_fn(step - 1)
opt = opt.apply_gradient(grad, learning_rate=learning_rate)
# Compute metrics.
metrics_update = train_metrics_cls.gather_from_model_output(
loss=loss,
**loss_aux,
predicted_segmentations=utils.remove_singleton_dim(
preds["outputs"].get("segmentations")), # pytype: disable=attribute-error
ground_truth_segmentations=batch.get("segmentations"),
predicted_max_num_instances=predicted_max_num_instances,
ground_truth_max_num_instances=ground_truth_max_num_instances,
padding_mask=batch.get("padding_mask"),
mask=batch.get("mask"))
return opt, state_vars, new_rng, metrics_update, step + 1
def train_and_evaluate(config: ml_collections.ConfigDict,
workdir: str):
"""Runs a training and evaluation loop.
Args:
config: Configuration to use.
workdir: Working directory for checkpoints and TF summaries. If this
contains checkpoint training will be resumed from the latest checkpoint.
"""
rng = jax.random.PRNGKey(config.seed)
tf.io.gfile.makedirs(workdir)
# Input pipeline.
rng, data_rng = jax.random.split(rng)
# Make sure each host uses a different RNG for the training data.
if config.get("seed_data", True): # Default to seeding data if not specified.
data_rng = jax.random.fold_in(data_rng, jax.host_id())
else:
data_rng = None
train_ds, eval_ds = input_pipeline.create_datasets(config, data_rng)
train_iter = iter(train_ds) # pytype: disable=wrong-arg-types
# Initialize model
model = utils.build_model_from_config(config.model)
learning_rate_fn = optax.warmup_cosine_decay_schedule(
init_value=0.,
peak_value=config.learning_rate,
warmup_steps=config.warmup_steps,
decay_steps=config.num_train_steps)
optimizer_def = flax.optim.Adam(learning_rate=config.learning_rate) # pytype: disable=module-attr
# Construct TrainMetrics and EvalMetrics, metrics collections.
train_metrics_cls = utils.make_metrics_collection("TrainMetrics",
config.train_metrics_spec)
eval_metrics_cls = utils.make_metrics_collection("EvalMetrics",
config.eval_metrics_spec)
def init_model(rng):
rng, init_rng, model_rng, dropout_rng = jax.random.split(rng, num=4)
init_conditioning = None
if config.get("conditioning_key"):
init_conditioning = jnp.ones(
[1] + list(train_ds.element_spec[config.conditioning_key].shape)[2:],
jnp.int32)
init_inputs = jnp.ones(
[1] + list(train_ds.element_spec["video"].shape)[2:],
jnp.float32)
initial_vars = model.init(
{"params": model_rng, "state_init": init_rng, "dropout": dropout_rng},
video=init_inputs, conditioning=init_conditioning,
padding_mask=jnp.ones(init_inputs.shape[:-1], jnp.int32))
# Split into state variables (e.g. for batchnorm stats) and model params.
# Note that `pop()` on a FrozenDict performs a deep copy.
state_vars, initial_params = initial_vars.pop("params") # pytype: disable=attribute-error
# Filter out intermediates (we don't want to store these in the TrainState).
state_vars = utils.filter_key_from_frozen_dict(
state_vars, key="intermediates")
return state_vars, initial_params
state_vars, initial_params = init_model(rng)
parameter_overview.log_parameter_overview(initial_params) # pytype: disable=wrong-arg-types
optimizer = optimizer_def.create(initial_params)
state = utils.TrainState(
step=1, optimizer=optimizer, rng=rng, variables=state_vars)
loss_fn = functools.partial(
losses.compute_full_loss, loss_config=config.losses)
checkpoint_dir = os.path.join(workdir, "checkpoints")
ckpt = checkpoint.MultihostCheckpoint(checkpoint_dir)
state = ckpt.restore_or_initialize(state)
initial_step = int(state.step)
# Replicate our parameters.
state = flax.jax_utils.replicate(state, devices=jax.local_devices())
del rng # rng is stored in the state.
# Only write metrics on host 0, write to logs on all other hosts.
writer = metric_writers.create_default_writer(
workdir, just_logging=jax.host_id() > 0)
writer.write_hparams(utils.prepare_dict_for_logging(config.to_dict()))
logging.info("Starting training loop at step %d.", initial_step)
report_progress = periodic_actions.ReportProgress(
num_train_steps=config.num_train_steps, writer=writer)
if jax.process_index() == 0:
profiler = periodic_actions.Profile(num_profile_steps=5, logdir=workdir)
p_train_step = jax.pmap(
train_step,
axis_name="batch",
donate_argnums=(1, 2, 3, 4, 5),
static_broadcasted_argnums=(0, 6, 7, 8, 9, 10, 11, 12))
train_metrics = None
with metric_writers.ensure_flushes(writer):
if config.num_train_steps == 0:
with report_progress.timed("eval"):
evaluate(model, state, eval_ds, loss_fn, eval_metrics_cls, config,
writer, step=0)
with report_progress.timed("checkpoint"):
ckpt.save(flax.jax_utils.unreplicate(state))
return
for step in range(initial_step, config.num_train_steps + 1):
# `step` is a Python integer. `state.step` is JAX integer on GPU/TPU.
is_last_step = step == config.num_train_steps
with jax.profiler.StepTraceAnnotation("train", step_num=step):
batch = jax.tree_map(np.asarray, next(train_iter))
opt, state_vars, rng, metrics_update, p_step = p_train_step(
model, state.rng, state.step, state.variables,
state.optimizer, batch, loss_fn, learning_rate_fn,
train_metrics_cls,
config.num_slots,
config.max_instances + 1, # Incl. background.
config.get("conditioning_key"),
config.get("max_grad_norm"))
state = state.replace( # pytype: disable=attribute-error
optimizer=opt,
step=p_step,
variables=state_vars,
rng=rng,
)
metric_update = flax.jax_utils.unreplicate(metrics_update)
train_metrics = (
metric_update
if train_metrics is None else train_metrics.merge(metric_update))
# Quick indication that training is happening.
logging.log_first_n(logging.INFO, "Finished training step %d.", 5, step)
report_progress(step, time.time())
if jax.process_index() == 0:
profiler(step)
if step % config.log_loss_every_steps == 0 or is_last_step:
metrics_res = train_metrics.compute()
writer.write_scalars(step, jax.tree_map(np.array, metrics_res))
train_metrics = None
if step % config.eval_every_steps == 0 or is_last_step:
with report_progress.timed("eval"):
evaluate(model, state, eval_ds, loss_fn, eval_metrics_cls,
config, writer, step=step)
if step % config.checkpoint_every_steps == 0 or is_last_step:
with report_progress.timed("checkpoint"):
ckpt.save(flax.jax_utils.unreplicate(state))
def evaluate(model, state, eval_ds, loss_fn_eval, eval_metrics_cls, config,
writer, step: int):
"""Evaluate the model."""
eval_metrics, eval_batch, eval_preds = evaluator.evaluate(
model,
state,
eval_ds,
loss_fn_eval,
eval_metrics_cls,
predicted_max_num_instances=config.num_slots,
ground_truth_max_num_instances=config.max_instances + 1, # Incl. bg.
slice_size=config.get("eval_slice_size"),
slice_keys=config.get("eval_slice_keys"),
conditioning_key=config.get("conditioning_key"),
remove_from_predictions=config.get("remove_from_predictions"),
metrics_on_cpu=config.get("metrics_on_cpu", False))
metrics_res = eval_metrics.compute()
writer.write_scalars(
step, jax.tree_map(np.array, utils.flatten_named_dicttree(metrics_res)))
writer.write_images(
step,
jax.tree_map(
np.array,
utils.prepare_images_for_logging(
config,
eval_batch,
eval_preds,
n_samples=config.get("n_samples", 5),
n_frames=config.get("n_frames", 5),
min_n_colors=config.get("logging_min_n_colors", 1))))
| {
"content_hash": "ee861c3399dbcfce5dc5ed2712d03088",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 111,
"avg_line_length": 39.54807692307692,
"alnum_prop": 0.6729880865548261,
"repo_name": "google-research/slot-attention-video",
"id": "c31e21b789fa8bcdd6343fc474fd6e15ae584585",
"size": "12915",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "savi/lib/trainer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "198864"
},
{
"name": "Shell",
"bytes": "783"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url, patterns
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
from haystack.views import search_view_factory
from .views import DocumentView, Search, LanguageRedirectView
search_view = search_view_factory(Search)
urlpatterns = patterns(
url(r'^admin/', include(admin.site.urls)),
url(r'^search/', search_view, name='search'),
(r'^$', LanguageRedirectView.as_view()),
(r'^i18n/', include('django.conf.urls.i18n')),
)
urlpatterns += i18n_patterns(
url(r'^$', DocumentView.as_view(), name='home'),
)
| {
"content_hash": "81f75e74110522017af746b6b712664d",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 61,
"avg_line_length": 32.833333333333336,
"alnum_prop": 0.7174280879864636,
"repo_name": "aaronhelton/django-multilingual-search",
"id": "bd357b6867128568bf63c5b47676d9037a7c91fe",
"size": "591",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/testproject/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4313"
},
{
"name": "Python",
"bytes": "53674"
},
{
"name": "Shell",
"bytes": "8126"
}
],
"symlink_target": ""
} |
import rospy
from std_msgs.msg import String
def callback(message):
rospy.loginfo("%s",message.data)
rospy.init_node('PlayMusicRobot')
sub = rospy.Subscriber('HandOff',String,callback)
rospy.spin()
| {
"content_hash": "33c079ebf0a5731efc938ab8f4003037",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 49,
"avg_line_length": 22.666666666666668,
"alnum_prop": 0.7549019607843137,
"repo_name": "shuheikawai/robosys2015",
"id": "d8c3e271e00193b13cb0e78fc8e5fe83aadf9454",
"size": "246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/PlayMusicRobot.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4362"
}
],
"symlink_target": ""
} |
import numpy as np
import matplotlib.pyplot as plt
import sys
# Add the PyRsw tools to the path
# At the moment it is given explicitely.
# In the future, it could also be added to the
# pythonpath environment variable
sys.path.append('../src')
import Steppers as Step
import Fluxes as Flux
from PyRsw import Simulation
from constants import minute, hour, day
sim = Simulation() # Create a simulation object
sim.run_name = '1D GeoAdjust'
# Geometry and Model Equations
sim.geomy = 'periodic' # Geometry Types: 'periodic' or 'walls'
sim.stepper = Step.AB3 # Time-stepping algorithm: Euler, AB2, RK4
sim.dynamics = 'Linear' # Dynamics: 'Nonlinear' or 'Linear'
sim.method = 'Sadourny' # Numerical method: 'Sadourny'
sim.flux_method = Flux.sadourny_sw # Flux method:
# Specify paramters
sim.Lx = 4000e3 # Domain extent (m)
sim.Ly = 4000e3 # Domain extent (m)
sim.Nx = 128 # Grid points in x
sim.Ny = 128 # Grid points in y
sim.Nz = 1 # Number of layers
sim.g = 9.81 # Gravity (m/sec^2)
sim.f0 = 0.e-4 # Coriolis (1/sec)
sim.beta = 0e-10 # Coriolis beta parameter (1/m/sec)
sim.Hs = [100.] # Vector of mean layer depths (m)
sim.rho = [1025.] # Vector of layer densities (kg/m^3)
sim.end_time = 1.0*24.*hour # End Time (sec)
# Parallel: Only applies to the FFTWs
sim.num_threads = 4
# Plotting parameters
sim.plott = 15.*minute # Period of plots
sim.animate = 'Anim' # 'Save' to create video frames,
# 'Anim' to animate,
# 'None' otherwise
sim.plot_vars = ['u','v','h'] # Specify which variables to plot
# Specify manual ylimits if desired
# An empty list uses default limits
sim.ylims=[[-0.18,0.18],[-0.18,0.18],[-0.5,1.0]]
# Output parameters
sim.output = False # True or False
sim.savet = 1.*hour # Time between saves
# Diagnostics parameters
sim.diagt = 2.*minute # Time for output
sim.diagnose = False # True or False
# Initialize the grid and zero solutions
sim.initialize()
for ii in range(sim.Nz): # Set mean depths
sim.soln.h[:,:,ii] = sim.Hs[ii]
# Gaussian initial conditions
x0 = 1.*sim.Lx/2. # Centre
W = 200.e3 # Width
amp = 1. # Amplitude
sim.soln.h[:,:,0] += amp*np.exp(-(sim.grid_y.h)**2/(W**2))
# Run the simulation
sim.run()
# Hovmuller plot
plt.figure()
t = np.arange(0,sim.end_time+sim.plott,sim.plott)/86400.
if sim.Ny==1:
x = sim.x/1e3
elif sim.Nx == 1:
x = sim.y/1e3
#for L in range(sim.Nz):
# field = sim.hov_h[:,0,:].T - np.sum(sim.Hs[L:])
# cv = np.max(np.abs(field.ravel()))
# plt.subplot(sim.Nz,1,L+1)
# plt.pcolormesh(x,t, field,
# cmap=sim.cmap, vmin = -cv, vmax = cv)
# plt.axis('tight')
# plt.title(r"$\mathrm{Hovm{\"o}ller} \; \mathrm{Plot} \; \mathrm{of} \; \eta$", fontsize = 16)
# if sim.Nx > 1:
# plt.xlabel(r"$\mathrm{x} \; \mathrm{(km)}$", fontsize=14)
# else:
# plt.xlabel(r"$\mathrm{y} \; \mathrm{(km)}$", fontsize=14)
# plt.ylabel(r"$\mathrm{Time} \; \mathrm{(days)}$", fontsize=14)
# plt.colorbar()
#plt.show()
| {
"content_hash": "ae7f15059e6fb4f1ca75e79f08d6770d",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 98,
"avg_line_length": 33.59405940594059,
"alnum_prop": 0.5688181550250516,
"repo_name": "PyRsw/PyRsw",
"id": "5a19fb3eb691ccfdd6b6c8437d8abe102563d70b",
"size": "3393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/example_2D_sadourny.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "FORTRAN",
"bytes": "7842"
},
{
"name": "Makefile",
"bytes": "199"
},
{
"name": "Python",
"bytes": "91604"
}
],
"symlink_target": ""
} |
"""rankmylibrary URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
]
| {
"content_hash": "f1ce7c0f71f56189d0219c0aa8504699",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 36.333333333333336,
"alnum_prop": 0.7011795543905636,
"repo_name": "NestarZ/rank-my-library",
"id": "27406a764b61fca82cd4a52498731ecd963d3ffa",
"size": "763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/rankmylibrary/rankmylibrary/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13061"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class SizeminValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="sizemin", parent_name="scatter.marker", **kwargs):
super(SizeminValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
| {
"content_hash": "32b273fad4966c907c6820d351443e64",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 86,
"avg_line_length": 36.833333333333336,
"alnum_prop": 0.6085972850678733,
"repo_name": "plotly/plotly.py",
"id": "a03781f33a48f6ecc03834ba5b565c1a0d26cc50",
"size": "442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scatter/marker/_sizemin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import stripe
import datetime
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_protect
from django.contrib.auth import logout as logout_user
from django.contrib.auth import login as login_user
from django.db.models.aggregates import Sum
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django.contrib.admin.views.decorators import staff_member_required
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.core.mail import mail_admins
from django.conf import settings
from apps.profile.models import Profile, PaymentHistory, RNewUserQueue, MRedeemedCode, MGiftCode
from apps.reader.models import UserSubscription, UserSubscriptionFolders, RUserStory
from apps.profile.forms import StripePlusPaymentForm, PLANS, DeleteAccountForm
from apps.profile.forms import ForgotPasswordForm, ForgotPasswordReturnForm, AccountSettingsForm
from apps.profile.forms import RedeemCodeForm
from apps.reader.forms import SignupForm, LoginForm
from apps.rss_feeds.models import MStarredStory, MStarredStoryCounts
from apps.social.models import MSocialServices, MActivity, MSocialProfile
from apps.analyzer.models import MClassifierTitle, MClassifierAuthor, MClassifierFeed, MClassifierTag
from utils import json_functions as json
from utils.user_functions import ajax_login_required
from utils.view_functions import render_to
from utils.user_functions import get_user
from utils import log as logging
from vendor.paypalapi.exceptions import PayPalAPIResponseError
from vendor.paypal.standard.forms import PayPalPaymentsForm
SINGLE_FIELD_PREFS = ('timezone','feed_pane_size','hide_mobile','send_emails',
'hide_getting_started', 'has_setup_feeds', 'has_found_friends',
'has_trained_intelligence',)
SPECIAL_PREFERENCES = ('old_password', 'new_password', 'autofollow_friends', 'dashboard_date',)
@ajax_login_required
@require_POST
@json.json_view
def set_preference(request):
code = 1
message = ''
new_preferences = request.POST
preferences = json.decode(request.user.profile.preferences)
for preference_name, preference_value in new_preferences.items():
if preference_value in ['true','false']: preference_value = True if preference_value == 'true' else False
if preference_name in SINGLE_FIELD_PREFS:
setattr(request.user.profile, preference_name, preference_value)
elif preference_name in SPECIAL_PREFERENCES:
if preference_name == 'autofollow_friends':
social_services = MSocialServices.get_user(request.user.pk)
social_services.autofollow = preference_value
social_services.save()
elif preference_name == 'dashboard_date':
request.user.profile.dashboard_date = datetime.datetime.utcnow()
else:
if preference_value in ["true", "false"]:
preference_value = True if preference_value == "true" else False
preferences[preference_name] = preference_value
if preference_name == 'intro_page':
logging.user(request, "~FBAdvancing intro to page ~FM~SB%s" % preference_value)
request.user.profile.preferences = json.encode(preferences)
request.user.profile.save()
logging.user(request, "~FMSaving preference: %s" % new_preferences)
response = dict(code=code, message=message, new_preferences=new_preferences)
return response
@ajax_login_required
@json.json_view
def get_preference(request):
code = 1
preference_name = request.POST.get('preference')
preferences = json.decode(request.user.profile.preferences)
payload = preferences
if preference_name:
payload = preferences.get(preference_name)
response = dict(code=code, payload=payload)
return response
@csrf_protect
def login(request):
form = LoginForm()
if request.method == "POST":
form = LoginForm(data=request.POST)
if form.is_valid():
login_user(request, form.get_user())
logging.user(form.get_user(), "~FG~BBOAuth Login~FW")
return HttpResponseRedirect(request.POST['next'] or reverse('index'))
return render_to_response('accounts/login.html', {
'form': form,
'next': request.REQUEST.get('next', "")
}, context_instance=RequestContext(request))
@csrf_protect
def signup(request):
form = SignupForm()
if request.method == "POST":
form = SignupForm(data=request.POST)
if form.is_valid():
new_user = form.save()
login_user(request, new_user)
logging.user(new_user, "~FG~SB~BBNEW SIGNUP: ~FW%s" % new_user.email)
new_user.profile.activate_free()
return HttpResponseRedirect(request.POST['next'] or reverse('index'))
return render_to_response('accounts/signup.html', {
'form': form,
'next': request.REQUEST.get('next', "")
}, context_instance=RequestContext(request))
@login_required
@csrf_protect
def redeem_code(request):
code = request.GET.get('code', None)
form = RedeemCodeForm(initial={'gift_code': code})
if request.method == "POST":
form = RedeemCodeForm(data=request.POST)
if form.is_valid():
gift_code = request.POST['gift_code']
MRedeemedCode.redeem(user=request.user, gift_code=gift_code)
return render_to_response('reader/paypal_return.xhtml',
{}, context_instance=RequestContext(request))
return render_to_response('accounts/redeem_code.html', {
'form': form,
'code': request.REQUEST.get('code', ""),
'next': request.REQUEST.get('next', "")
}, context_instance=RequestContext(request))
@ajax_login_required
@require_POST
@json.json_view
def set_account_settings(request):
code = -1
message = 'OK'
form = AccountSettingsForm(user=request.user, data=request.POST)
if form.is_valid():
form.save()
code = 1
else:
message = form.errors[form.errors.keys()[0]][0]
payload = {
"username": request.user.username,
"email": request.user.email,
"social_profile": MSocialProfile.profile(request.user.pk)
}
return dict(code=code, message=message, payload=payload)
@ajax_login_required
@require_POST
@json.json_view
def set_view_setting(request):
code = 1
feed_id = request.POST['feed_id']
feed_view_setting = request.POST.get('feed_view_setting')
feed_order_setting = request.POST.get('feed_order_setting')
feed_read_filter_setting = request.POST.get('feed_read_filter_setting')
feed_layout_setting = request.POST.get('feed_layout_setting')
view_settings = json.decode(request.user.profile.view_settings)
setting = view_settings.get(feed_id, {})
if isinstance(setting, basestring): setting = {'v': setting}
if feed_view_setting: setting['v'] = feed_view_setting
if feed_order_setting: setting['o'] = feed_order_setting
if feed_read_filter_setting: setting['r'] = feed_read_filter_setting
if feed_layout_setting: setting['l'] = feed_layout_setting
view_settings[feed_id] = setting
request.user.profile.view_settings = json.encode(view_settings)
request.user.profile.save()
logging.user(request, "~FMView settings: %s/%s/%s/%s" % (feed_view_setting,
feed_order_setting, feed_read_filter_setting, feed_layout_setting))
response = dict(code=code)
return response
@ajax_login_required
@require_POST
@json.json_view
def clear_view_setting(request):
code = 1
view_setting_type = request.POST.get('view_setting_type')
view_settings = json.decode(request.user.profile.view_settings)
new_view_settings = {}
removed = 0
for feed_id, view_setting in view_settings.items():
if view_setting_type == 'layout' and 'l' in view_setting:
del view_setting['l']
removed += 1
if view_setting_type == 'view' and 'v' in view_setting:
del view_setting['v']
removed += 1
new_view_settings[feed_id] = view_setting
request.user.profile.view_settings = json.encode(new_view_settings)
request.user.profile.save()
logging.user(request, "~FMClearing view settings: %s (found %s)" % (view_setting_type, removed))
response = dict(code=code, view_settings=view_settings, removed=removed)
return response
@ajax_login_required
@json.json_view
def get_view_setting(request):
code = 1
feed_id = request.POST['feed_id']
view_settings = json.decode(request.user.profile.view_settings)
response = dict(code=code, payload=view_settings.get(feed_id))
return response
@ajax_login_required
@require_POST
@json.json_view
def set_collapsed_folders(request):
code = 1
collapsed_folders = request.POST['collapsed_folders']
request.user.profile.collapsed_folders = collapsed_folders
request.user.profile.save()
logging.user(request, "~FMCollapsing folder: %s" % collapsed_folders)
response = dict(code=code)
return response
@ajax_login_required
def paypal_form(request):
domain = Site.objects.get_current().domain
paypal_dict = {
"cmd": "_xclick-subscriptions",
"business": "samuel@ofbrooklyn.com",
"a3": "12.00", # price
"p3": 1, # duration of each unit (depends on unit)
"t3": "Y", # duration unit ("M for Month")
"src": "1", # make payments recur
"sra": "1", # reattempt payment on payment error
"no_note": "1", # remove extra notes (optional)
"item_name": "NewsBlur Premium Account",
"notify_url": "http://%s%s" % (domain, reverse('paypal-ipn')),
"return_url": "http://%s%s" % (domain, reverse('paypal-return')),
"cancel_return": "http://%s%s" % (domain, reverse('index')),
"custom": request.user.username,
}
# Create the instance.
form = PayPalPaymentsForm(initial=paypal_dict, button_type="subscribe")
logging.user(request, "~FBLoading paypal/feedchooser")
# Output the button.
return HttpResponse(form.render(), mimetype='text/html')
def paypal_return(request):
return render_to_response('reader/paypal_return.xhtml', {
}, context_instance=RequestContext(request))
@login_required
def activate_premium(request):
return HttpResponseRedirect(reverse('index'))
@ajax_login_required
@json.json_view
def profile_is_premium(request):
# Check tries
code = 0
retries = int(request.GET['retries'])
profile = Profile.objects.get(user=request.user)
subs = UserSubscription.objects.filter(user=request.user)
total_subs = subs.count()
activated_subs = subs.filter(active=True).count()
if retries >= 30:
code = -1
if not request.user.profile.is_premium:
subject = "Premium activation failed: %s (%s/%s)" % (request.user, activated_subs, total_subs)
message = """User: %s (%s) -- Email: %s""" % (request.user.username, request.user.pk, request.user.email)
mail_admins(subject, message, fail_silently=True)
request.user.profile.is_premium = True
request.user.profile.save()
return {
'is_premium': profile.is_premium,
'code': code,
'activated_subs': activated_subs,
'total_subs': total_subs,
}
@login_required
def stripe_form(request):
user = request.user
success_updating = False
stripe.api_key = settings.STRIPE_SECRET
plan = int(request.GET.get('plan', 2))
plan = PLANS[plan-1][0]
error = None
if request.method == 'POST':
zebra_form = StripePlusPaymentForm(request.POST, email=user.email)
if zebra_form.is_valid():
user.email = zebra_form.cleaned_data['email']
user.save()
current_premium = (user.profile.is_premium and
user.profile.premium_expire and
user.profile.premium_expire > datetime.datetime.now())
# Are they changing their existing card?
if user.profile.stripe_id and current_premium:
customer = stripe.Customer.retrieve(user.profile.stripe_id)
try:
card = customer.cards.create(card=zebra_form.cleaned_data['stripe_token'])
except stripe.CardError:
error = "This card was declined."
else:
customer.default_card = card.id
customer.save()
success_updating = True
else:
try:
customer = stripe.Customer.create(**{
'card': zebra_form.cleaned_data['stripe_token'],
'plan': zebra_form.cleaned_data['plan'],
'email': user.email,
'description': user.username,
})
except stripe.CardError:
error = "This card was declined."
else:
user.profile.strip_4_digits = zebra_form.cleaned_data['last_4_digits']
user.profile.stripe_id = customer.id
user.profile.save()
user.profile.activate_premium() # TODO: Remove, because webhooks are slow
success_updating = True
else:
zebra_form = StripePlusPaymentForm(email=user.email, plan=plan)
if success_updating:
return render_to_response('reader/paypal_return.xhtml',
{}, context_instance=RequestContext(request))
new_user_queue_count = RNewUserQueue.user_count()
new_user_queue_position = RNewUserQueue.user_position(request.user.pk)
new_user_queue_behind = 0
if new_user_queue_position >= 0:
new_user_queue_behind = new_user_queue_count - new_user_queue_position
new_user_queue_position -= 1
logging.user(request, "~BM~FBLoading Stripe form")
return render_to_response('profile/stripe_form.xhtml',
{
'zebra_form': zebra_form,
'publishable': settings.STRIPE_PUBLISHABLE,
'success_updating': success_updating,
'new_user_queue_count': new_user_queue_count - 1,
'new_user_queue_position': new_user_queue_position,
'new_user_queue_behind': new_user_queue_behind,
'error': error,
},
context_instance=RequestContext(request)
)
@render_to('reader/activities_module.xhtml')
def load_activities(request):
user = get_user(request)
page = max(1, int(request.REQUEST.get('page', 1)))
activities, has_next_page = MActivity.user(user.pk, page=page)
return {
'activities': activities,
'page': page,
'has_next_page': has_next_page,
'username': 'You',
}
@ajax_login_required
@json.json_view
def payment_history(request):
user = request.user
if request.user.is_staff:
user_id = request.REQUEST.get('user_id', request.user.pk)
user = User.objects.get(pk=user_id)
history = PaymentHistory.objects.filter(user=user)
statistics = {
"created_date": user.date_joined,
"last_seen_date": user.profile.last_seen_on,
"last_seen_ip": user.profile.last_seen_ip,
"timezone": unicode(user.profile.timezone),
"stripe_id": user.profile.stripe_id,
"profile": user.profile,
"feeds": UserSubscription.objects.filter(user=user).count(),
"email": user.email,
"read_story_count": RUserStory.read_story_count(user.pk),
"feed_opens": UserSubscription.objects.filter(user=user).aggregate(sum=Sum('feed_opens'))['sum'],
"training": {
'title': MClassifierTitle.objects.filter(user_id=user.pk).count(),
'tag': MClassifierTag.objects.filter(user_id=user.pk).count(),
'author': MClassifierAuthor.objects.filter(user_id=user.pk).count(),
'feed': MClassifierFeed.objects.filter(user_id=user.pk).count(),
}
}
return {
'is_premium': user.profile.is_premium,
'premium_expire': user.profile.premium_expire,
'payments': history,
'statistics': statistics,
}
@ajax_login_required
@json.json_view
def cancel_premium(request):
canceled = request.user.profile.cancel_premium()
return {
'code': 1 if canceled else -1,
}
@staff_member_required
@ajax_login_required
@json.json_view
def refund_premium(request):
user_id = request.REQUEST.get('user_id')
partial = request.REQUEST.get('partial', False)
user = User.objects.get(pk=user_id)
try:
refunded = user.profile.refund_premium(partial=partial)
except stripe.InvalidRequestError, e:
refunded = e
except PayPalAPIResponseError, e:
refunded = e
return {'code': 1 if refunded else -1, 'refunded': refunded}
@staff_member_required
@ajax_login_required
@json.json_view
def upgrade_premium(request):
user_id = request.REQUEST.get('user_id')
user = User.objects.get(pk=user_id)
gift = MGiftCode.add(gifting_user_id=User.objects.get(username='samuel').pk,
receiving_user_id=user.pk)
MRedeemedCode.redeem(user, gift.gift_code)
return {'code': user.profile.is_premium}
@staff_member_required
@ajax_login_required
@json.json_view
def never_expire_premium(request):
user_id = request.REQUEST.get('user_id')
user = User.objects.get(pk=user_id)
if user.profile.is_premium:
user.profile.premium_expire = None
user.profile.save()
return {'code': 1}
return {'code': -1}
@staff_member_required
@ajax_login_required
@json.json_view
def update_payment_history(request):
user_id = request.REQUEST.get('user_id')
user = User.objects.get(pk=user_id)
user.profile.setup_premium_history(check_premium=False)
return {'code': 1}
@login_required
@render_to('profile/delete_account.xhtml')
def delete_account(request):
if request.method == 'POST':
form = DeleteAccountForm(request.POST, user=request.user)
if form.is_valid():
logging.user(request.user, "~SK~BC~FRDeleting ~SB%s~SN's account." %
request.user.username)
request.user.profile.delete_user(confirm=True)
logout_user(request)
return HttpResponseRedirect(reverse('index'))
else:
logging.user(request.user, "~BC~FRFailed attempt to delete ~SB%s~SN's account." %
request.user.username)
else:
logging.user(request.user, "~BC~FRAttempting to delete ~SB%s~SN's account." %
request.user.username)
form = DeleteAccountForm(user=request.user)
return {
'delete_form': form,
}
@render_to('profile/forgot_password.xhtml')
def forgot_password(request):
if request.method == 'POST':
form = ForgotPasswordForm(request.POST)
if form.is_valid():
logging.user(request.user, "~BC~FRForgot password: ~SB%s" % request.POST['email'])
try:
user = User.objects.get(email__iexact=request.POST['email'])
except User.MultipleObjectsReturned:
user = User.objects.filter(email__iexact=request.POST['email'])[0]
user.profile.send_forgot_password_email()
return HttpResponseRedirect(reverse('index'))
else:
logging.user(request.user, "~BC~FRFailed forgot password: ~SB%s~SN" %
request.POST['email'])
else:
logging.user(request.user, "~BC~FRAttempting to retrieve forgotton password.")
form = ForgotPasswordForm()
return {
'forgot_password_form': form,
}
@login_required
@render_to('profile/forgot_password_return.xhtml')
def forgot_password_return(request):
if request.method == 'POST':
logging.user(request.user, "~BC~FRReseting ~SB%s~SN's password." %
request.user.username)
new_password = request.POST.get('password', '')
request.user.set_password(new_password)
request.user.save()
return HttpResponseRedirect(reverse('index'))
else:
logging.user(request.user, "~BC~FRAttempting to reset ~SB%s~SN's password." %
request.user.username)
form = ForgotPasswordReturnForm()
return {
'forgot_password_return_form': form,
}
@ajax_login_required
@json.json_view
def delete_starred_stories(request):
timestamp = request.POST.get('timestamp', None)
if timestamp:
delete_date = datetime.datetime.fromtimestamp(int(timestamp))
else:
delete_date = datetime.datetime.now()
starred_stories = MStarredStory.objects.filter(user_id=request.user.pk,
starred_date__lte=delete_date)
stories_deleted = starred_stories.count()
starred_stories.delete()
MStarredStoryCounts.count_for_user(request.user.pk, total_only=True)
starred_counts, starred_count = MStarredStoryCounts.user_counts(request.user.pk, include_total=True)
logging.user(request.user, "~BC~FRDeleting %s/%s starred stories (%s)" % (stories_deleted,
stories_deleted+starred_count, delete_date))
return dict(code=1, stories_deleted=stories_deleted, starred_counts=starred_counts,
starred_count=starred_count)
@ajax_login_required
@json.json_view
def delete_all_sites(request):
request.user.profile.send_opml_export_email(reason="You have deleted all of your sites, so here's a backup just in case.")
subs = UserSubscription.objects.filter(user=request.user)
sub_count = subs.count()
subs.delete()
usf = UserSubscriptionFolders.objects.get(user=request.user)
usf.folders = '[]'
usf.save()
logging.user(request.user, "~BC~FRDeleting %s sites" % sub_count)
return dict(code=1)
@login_required
@render_to('profile/email_optout.xhtml')
def email_optout(request):
user = request.user
user.profile.send_emails = False
user.profile.save()
return {
"user": user,
}
| {
"content_hash": "ec3b4076f982942c3266696695f413ff",
"timestamp": "",
"source": "github",
"line_count": 608,
"max_line_length": 126,
"avg_line_length": 37.36842105263158,
"alnum_prop": 0.6384683098591549,
"repo_name": "slava-sh/NewsBlur",
"id": "33fe209030d16b5261230b8d1365dd26cb0e1e84",
"size": "22720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/profile/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4431"
},
{
"name": "C++",
"bytes": "2926"
},
{
"name": "CSS",
"bytes": "674585"
},
{
"name": "CoffeeScript",
"bytes": "6451"
},
{
"name": "HTML",
"bytes": "265992"
},
{
"name": "Java",
"bytes": "696119"
},
{
"name": "JavaScript",
"bytes": "1561094"
},
{
"name": "M",
"bytes": "47696"
},
{
"name": "Nginx",
"bytes": "897"
},
{
"name": "Objective-C",
"bytes": "3716549"
},
{
"name": "Perl",
"bytes": "55598"
},
{
"name": "Python",
"bytes": "2374227"
},
{
"name": "R",
"bytes": "527"
},
{
"name": "Ruby",
"bytes": "870"
},
{
"name": "Shell",
"bytes": "40018"
}
],
"symlink_target": ""
} |
from __future__ import division
import os
import subprocess
from tempfile import TemporaryFile, NamedTemporaryFile
import wave
import sys
from .logging_utils import log_conversion
try:
from StringIO import StringIO
except:
from io import StringIO, BytesIO
from .utils import (
_fd_or_path_or_tempfile,
db_to_float,
ratio_to_db,
get_encoder_name,
audioop,
)
from .exceptions import (
TooManyMissingFrames,
InvalidDuration,
InvalidID3TagVersion,
InvalidTag,
CouldntDecodeError,
)
if sys.version_info >= (3, 0):
basestring = str
xrange = range
StringIO = BytesIO
class ClassPropertyDescriptor(object):
def __init__(self, fget, fset=None):
self.fget = fget
self.fset = fset
def __get__(self, obj, klass=None):
if klass is None:
klass = type(obj)
return self.fget.__get__(obj, klass)()
def __set__(self, obj, value):
if not self.fset:
raise AttributeError("can't set attribute")
type_ = type(obj)
return self.fset.__get__(obj, type_)(value)
def setter(self, func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
self.fset = func
return self
def classproperty(func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
return ClassPropertyDescriptor(func)
AUDIO_FILE_EXT_ALIASES = {
"m4a": "mp4",
"wave": "wav",
}
class AudioSegment(object):
"""
AudioSegments are *immutable* objects representing segments of audio
that can be manipulated using python code.
AudioSegments are slicable using milliseconds.
for example:
a = AudioSegment.from_mp3(mp3file)
first_second = a[:1000] # get the first second of an mp3
slice = a[5000:10000] # get a slice from 5 to 10 seconds of an mp3
"""
converter = get_encoder_name() # either ffmpeg or avconv
# TODO: remove in 1.0 release
# maintain backwards compatibility for ffmpeg attr (now called converter)
@classproperty
def ffmpeg(cls):
return cls.converter
@ffmpeg.setter
def ffmpeg(cls, val):
cls.converter = val
DEFAULT_CODECS = {
"ogg": "libvorbis"
}
def __init__(self, data=None, *args, **kwargs):
if kwargs.get('metadata', False):
# internal use only
self._data = data
for attr, val in kwargs.pop('metadata').items():
setattr(self, attr, val)
else:
# normal construction
data = data if isinstance(data, basestring) else data.read()
raw = wave.open(StringIO(data), 'rb')
raw.rewind()
self.channels = raw.getnchannels()
self.sample_width = raw.getsampwidth()
self.frame_rate = raw.getframerate()
self.frame_width = self.channels * self.sample_width
raw.rewind()
self._data = raw.readframes(float('inf'))
super(AudioSegment, self).__init__(*args, **kwargs)
def __len__(self):
"""
returns the length of this audio segment in milliseconds
"""
return round(1000 * (self.frame_count() / self.frame_rate))
def __eq__(self, other):
try:
return self._data == other._data
except:
return False
def __ne__(self, other):
return not (self == other)
def __iter__(self):
return (self[i] for i in xrange(len(self)))
def __getitem__(self, millisecond):
if isinstance(millisecond, slice):
start = millisecond.start if millisecond.start is not None else 0
end = millisecond.stop if millisecond.stop is not None \
else len(self)
start = min(start, len(self))
end = min(end, len(self))
else:
start = millisecond
end = millisecond + 1
start = self._parse_position(start) * self.frame_width
end = self._parse_position(end) * self.frame_width
data = self._data[start:end]
# ensure the output is as long as the requester is expecting
expected_length = end - start
missing_frames = (expected_length - len(data)) // self.frame_width
if missing_frames:
if missing_frames > self.frame_count(ms=2):
raise TooManyMissingFrames(
"You should never be filling in "
" more than 2 ms with silence here, "
"missing frames: %s" % missing_frames)
silence = audioop.mul(data[:self.frame_width],
self.sample_width, 0)
data += (silence * missing_frames)
return self._spawn(data)
def get_sample_slice(self, start_sample=None, end_sample=None):
"""
Get a section of the audio segment by sample index.
NOTE: Negative indices do *not* address samples backword
from the end of the audio segment like a python list.
This is intentional.
"""
max_val = int(self.frame_count())
def bounded(val, default):
if val is None:
return default
if val < 0:
return 0
if val > max_val:
return max_val
return val
start_i = bounded(start_sample, 0) * self.frame_width
end_i = bounded(end_sample, max_val) * self.frame_width
data = self._data[start_i:end_i]
return self._spawn(data)
def __add__(self, arg):
if isinstance(arg, AudioSegment):
return self.append(arg, crossfade=0)
else:
return self.apply_gain(arg)
def __sub__(self, arg):
if isinstance(arg, AudioSegment):
raise TypeError("AudioSegment objects can't be subtracted from "
"each other")
else:
return self.apply_gain(-arg)
def __mul__(self, arg):
"""
If the argument is an AudioSegment, overlay the multiplied audio
segment.
If it's a number, just use the string multiply operation to repeat the
audio.
The following would return an AudioSegment that contains the
audio of audio_seg eight times
`audio_seg * 8`
"""
if isinstance(arg, AudioSegment):
return self.overlay(arg, position=0, loop=True)
else:
return self._spawn(data=self._data * arg)
def _spawn(self, data, overrides={}):
"""
Creates a new audio segment using the metadata from the current one
and the data passed in. Should be used whenever an AudioSegment is
being returned by an operation that would alters the current one,
since AudioSegment objects are immutable.
"""
# accept lists of data chunks
if isinstance(data, list):
data = b''.join(data)
# accept file-like objects
if hasattr(data, 'read'):
if hasattr(data, 'seek'):
data.seek(0)
data = data.read()
metadata = {
'sample_width': self.sample_width,
'frame_rate': self.frame_rate,
'frame_width': self.frame_width,
'channels': self.channels
}
metadata.update(overrides)
return AudioSegment(data=data, metadata=metadata)
@classmethod
def _sync(cls, seg1, seg2):
s1_len, s2_len = len(seg1), len(seg2)
channels = max(seg1.channels, seg2.channels)
seg1 = seg1.set_channels(channels)
seg2 = seg2.set_channels(channels)
frame_rate = max(seg1.frame_rate, seg2.frame_rate)
seg1 = seg1.set_frame_rate(frame_rate)
seg2 = seg2.set_frame_rate(frame_rate)
sample_width = max(seg1.sample_width, seg2.sample_width)
seg1 = seg1.set_sample_width(sample_width)
seg2 = seg2.set_sample_width(sample_width)
assert(len(seg1) == s1_len)
assert(len(seg2) == s2_len)
return seg1, seg2
def _parse_position(self, val):
if val < 0:
val = len(self) - abs(val)
val = self.frame_count(ms=len(self)) if val == float("inf") else \
self.frame_count(ms=val)
return int(val)
@classmethod
def empty(cls):
return cls(b'', metadata={
"channels": 1,
"sample_width": 1,
"frame_rate": 1,
"frame_width": 1
})
@classmethod
def silent(cls, duration=1000):
"""
Generate a silent audio segment.
duration specified in milliseconds (default: 1000ms).
"""
# lowest frame rate I've seen in actual use
frame_rate = 11025
frames = int(frame_rate * (duration / 1000.0))
data = b"\0\0" * frames
return cls(data, metadata={"channels": 1,
"sample_width": 2,
"frame_rate": frame_rate,
"frame_width": 2})
@classmethod
def from_file(cls, file, format=None):
orig_file = file
file = _fd_or_path_or_tempfile(file, 'rb', tempfile=False)
if format:
format = AUDIO_FILE_EXT_ALIASES.get(format, format)
if format == "wav" or (isinstance(orig_file, basestring) and orig_file.endswith(".wav")):
try:
return cls._from_safe_wav(file)
except:
file.seek(0)
input_file = NamedTemporaryFile(mode='wb', delete=False)
input_file.write(file.read())
input_file.flush()
output = NamedTemporaryFile(mode="rb", delete=False)
convertion_command = [cls.converter,
'-y', # always overwrite existing files
]
# If format is not defined
# ffmpeg/avconv will detect it automatically
if format:
convertion_command += ["-f", format]
convertion_command += [
"-i", input_file.name, # input_file options (filename last)
"-vn", # Drop any video streams if there are any
"-f", "wav", # output options (filename last)
output.name
]
log_conversion(convertion_command)
retcode = subprocess.call(convertion_command, stderr=open(os.devnull))
if retcode != 0:
raise CouldntDecodeError("Decoding failed. ffmpeg returned error code: {0}".format(retcode))
obj = cls._from_safe_wav(output)
input_file.close()
output.close()
os.unlink(input_file.name)
os.unlink(output.name)
return obj
@classmethod
def from_mp3(cls, file):
return cls.from_file(file, 'mp3')
@classmethod
def from_flv(cls, file):
return cls.from_file(file, 'flv')
@classmethod
def from_ogg(cls, file):
return cls.from_file(file, 'ogg')
@classmethod
def from_wav(cls, file):
return cls.from_file(file, 'wav')
@classmethod
def _from_safe_wav(cls, file):
file = _fd_or_path_or_tempfile(file, 'rb', tempfile=False)
file.seek(0)
return cls(data=file)
def export(self, out_f=None, format='mp3', codec=None, bitrate=None, parameters=None, tags=None, id3v2_version='4'):
"""
Export an AudioSegment to a file with given options
out_f (string):
Path to destination audio file
format (string)
Format for destination audio file.
('mp3', 'wav', 'ogg' or other ffmpeg/avconv supported files)
codec (string)
Codec used to encoding for the destination.
bitrate (string)
Bitrate used when encoding destination file. (64, 92, 128, 256, 312k...)
Each codec accepts different bitrate arguments so take a look at the
ffmpeg documentation for details (bitrate usually shown as -b, -ba or
-a:b).
parameters (string)
Aditional ffmpeg/avconv parameters
tags (dict)
Set metadata information to destination files
usually used as tags. ({title='Song Title', artist='Song Artist'})
id3v2_version (string)
Set ID3v2 version for tags. (default: '4')
"""
id3v2_allowed_versions = ['3', '4']
out_f = _fd_or_path_or_tempfile(out_f, 'wb+')
out_f.seek(0)
# for wav output we can just write the data directly to out_f
if format == "wav":
data = out_f
else:
data = NamedTemporaryFile(mode="wb", delete=False)
wave_data = wave.open(data, 'wb')
wave_data.setnchannels(self.channels)
wave_data.setsampwidth(self.sample_width)
wave_data.setframerate(self.frame_rate)
# For some reason packing the wave header struct with
# a float in python 2 doesn't throw an exception
wave_data.setnframes(int(self.frame_count()))
wave_data.writeframesraw(self._data)
wave_data.close()
# for wav files, we're done (wav data is written directly to out_f)
if format == 'wav':
return out_f
output = NamedTemporaryFile(mode="w+b", delete=False)
# build converter command to export
convertion_command = [
self.converter,
'-y', # always overwrite existing files
"-f", "wav", "-i", data.name, # input options (filename last)
]
if codec is None:
codec = self.DEFAULT_CODECS.get(format, None)
if codec is not None:
# force audio encoder
convertion_command.extend(["-acodec", codec])
if bitrate is not None:
convertion_command.extend(["-b:a", bitrate])
if parameters is not None:
# extend arguments with arbitrary set
convertion_command.extend(parameters)
if tags is not None:
if not isinstance(tags, dict):
raise InvalidTag("Tags must be a dictionary.")
else:
# Extend converter command with tags
# print(tags)
for key, value in tags.items():
convertion_command.extend(
['-metadata', '{0}={1}'.format(key, value)])
if format == 'mp3':
# set id3v2 tag version
if id3v2_version not in id3v2_allowed_versions:
raise InvalidID3TagVersion(
"id3v2_version not allowed, allowed versions: %s" % id3v2_allowed_versions)
convertion_command.extend([
"-id3v2_version", id3v2_version
])
convertion_command.extend([
"-f", format, output.name, # output options (filename last)
])
log_conversion(convertion_command)
# read stdin / write stdout
subprocess.call(convertion_command,
# make converter shut up
stderr=open(os.devnull)
)
output.seek(0)
out_f.write(output.read())
data.close()
output.close()
os.unlink(data.name)
os.unlink(output.name)
out_f.seek(0)
return out_f
def get_frame(self, index):
frame_start = index * self.frame_width
frame_end = frame_start + self.frame_width
return self._data[frame_start:frame_end]
def frame_count(self, ms=None):
"""
returns the number of frames for the given number of milliseconds, or
if not specified, the number of frames in the whole AudioSegment
"""
if ms is not None:
return ms * (self.frame_rate / 1000.0)
else:
return float(len(self._data) // self.frame_width)
def set_sample_width(self, sample_width):
if sample_width == self.sample_width:
return self
data = self._data
if self.sample_width == 1:
data = audioop.bias(data, 1, -128)
if data:
data = audioop.lin2lin(data, self.sample_width, sample_width)
if sample_width == 1:
data = audioop.bias(data, 1, 128)
frame_width = self.channels * sample_width
return self._spawn(data, overrides={'sample_width': sample_width,
'frame_width': frame_width})
def set_frame_rate(self, frame_rate):
if frame_rate == self.frame_rate:
return self
if self._data:
converted, _ = audioop.ratecv(self._data, self.sample_width,
self.channels, self.frame_rate,
frame_rate, None)
else:
converted = self._data
return self._spawn(data=converted,
overrides={'frame_rate': frame_rate})
def set_channels(self, channels):
if channels == self.channels:
return self
if channels == 2 and self.channels == 1:
fn = audioop.tostereo
frame_width = self.frame_width * 2
elif channels == 1 and self.channels == 2:
fn = audioop.tomono
frame_width = self.frame_width // 2
converted = fn(self._data, self.sample_width, 1, 1)
return self._spawn(data=converted,
overrides={
'channels': channels,
'frame_width': frame_width})
def split_to_mono(self):
if self.channels == 1:
return [self]
left_channel = audioop.tomono(self._data, self.sample_width, 1, 0)
right_channel = audioop.tomono(self._data, self.sample_width, 0, 1)
return [self._spawn(data=left_channel,
overrides={'channels': 1,
'frame_width': self.sample_width}),
self._spawn(data=right_channel,
overrides={'channels': 1,
'frame_width': self.sample_width})]
@property
def rms(self):
if self.sample_width == 1:
return self.set_sample_width(2).rms
else:
return audioop.rms(self._data, self.sample_width)
@property
def dBFS(self):
rms = self.rms
if not rms:
return - float("infinity")
return ratio_to_db(self.rms / self.max_possible_amplitude)
@property
def max(self):
return audioop.max(self._data, self.sample_width)
@property
def max_possible_amplitude(self):
bits = self.sample_width * 8
max_possible_val = (2 ** bits)
# since half is above 0 and half is below the max amplitude is divided
return max_possible_val / 2
@property
def max_dBFS(self):
return ratio_to_db(self.max, self.max_possible_amplitude)
@property
def duration_seconds(self):
return self.frame_rate and self.frame_count() / self.frame_rate or 0.0
def apply_gain(self, volume_change):
return self._spawn(data=audioop.mul(self._data, self.sample_width,
db_to_float(float(volume_change))))
def overlay(self, seg, position=0, loop=False, times=None):
"""
Overlay the provided segment on to this segment starting at the
specificed position and using the specfied looping beahvior.
seg (AudioSegment):
The audio segment to overlay on to this one.
position (optional int):
The position to start overlaying the provided segment in to this
one.
loop (optional bool):
Loop seg as many times as necessary to match this segment's length.
Overrides loops param.
times (optional int):
Loop seg the specified number of times or until it matches this
segment's length. 1 means once, 2 means twice, ... 0 would make the
call a no-op
"""
if loop:
# match loop=True's behavior with new times (count) mechinism.
times = -1
elif times is None:
# no times specified, just once through
times = 1
elif times == 0:
# it's a no-op, make a copy since we never mutate
return self._spawn(self._data)
output = StringIO()
seg1, seg2 = AudioSegment._sync(self, seg)
sample_width = seg1.sample_width
spawn = seg1._spawn
output.write(seg1[:position]._data)
# drop down to the raw data
seg1 = seg1[position:]._data
seg2 = seg2._data
pos = 0
seg1_len = len(seg1)
seg2_len = len(seg2)
while times:
remaining = max(0, seg1_len - pos)
if seg2_len >= remaining:
seg2 = seg2[:remaining]
seg2_len = remaining
# we've hit the end, we're done looping (if we were) and this
# is our last go-around
times = 1
output.write(audioop.add(seg1[pos:pos + seg2_len], seg2,
sample_width))
pos += seg2_len
# dec times to break our while loop (eventually)
times -= 1
output.write(seg1[pos:])
return spawn(data=output)
def append(self, seg, crossfade=100):
seg1, seg2 = AudioSegment._sync(self, seg)
if not crossfade:
return seg1._spawn(seg1._data + seg2._data)
xf = seg1[-crossfade:].fade(to_gain=-120, start=0, end=float('inf'))
xf *= seg2[:crossfade].fade(from_gain=-120, start=0, end=float('inf'))
output = TemporaryFile()
output.write(seg1[:-crossfade]._data)
output.write(xf._data)
output.write(seg2[crossfade:]._data)
output.seek(0)
return seg1._spawn(data=output)
def fade(self, to_gain=0, from_gain=0, start=None, end=None,
duration=None):
"""
Fade the volume of this audio segment.
to_gain (float):
resulting volume_change in db
start (int):
default = beginning of the segment
when in this segment to start fading in milliseconds
end (int):
default = end of the segment
when in this segment to start fading in milliseconds
duration (int):
default = until the end of the audio segment
the duration of the fade
"""
if None not in [duration, end, start]:
raise TypeError('Only two of the three arguments, "start", '
'"end", and "duration" may be specified')
# no fade == the same audio
if to_gain == 0 and from_gain == 0:
return self
start = min(len(self), start) if start is not None else None
end = min(len(self), end) if end is not None else None
if start is not None and start < 0:
start += len(self)
if end is not None and end < 0:
end += len(self)
if duration is not None and duration < 0:
raise InvalidDuration("duration must be a positive integer")
if duration:
if start is not None:
end = start + duration
elif end is not None:
start = end - duration
else:
duration = end - start
from_power = db_to_float(from_gain)
output = []
# original data - up until the crossfade portion, as is
before_fade = self[:start]._data
if from_gain != 0:
before_fade = audioop.mul(before_fade,
self.sample_width,
from_power)
output.append(before_fade)
gain_delta = db_to_float(to_gain) - from_power
# fades longer than 100ms can use coarse fading (one gain step per ms),
# shorter fades will have audible clicks so they use precise fading
#(one gain step per sample)
if duration > 100:
scale_step = gain_delta / duration
for i in range(duration):
volume_change = from_power + (scale_step * i)
chunk = self[start + i]
chunk = audioop.mul(chunk._data,
self.sample_width,
volume_change)
output.append(chunk)
else:
start_frame = self.frame_count(ms=start)
end_frame = self.frame_count(ms=end)
fade_frames = end_frame - start_frame
scale_step = gain_delta / fade_frames
for i in range(int(fade_frames)):
volume_change = from_power + (scale_step * i)
sample = self.get_frame(int(start_frame + i))
sample = audioop.mul(sample, self.sample_width, volume_change)
output.append(sample)
# original data after the crossfade portion, at the new volume
after_fade = self[end:]._data
if to_gain != 0:
after_fade = audioop.mul(after_fade,
self.sample_width,
db_to_float(to_gain))
output.append(after_fade)
return self._spawn(data=output)
def fade_out(self, duration):
return self.fade(to_gain=-120, duration=duration, end=float('inf'))
def fade_in(self, duration):
return self.fade(from_gain=-120, duration=duration, start=0)
def reverse(self):
return self._spawn(
data=audioop.reverse(self._data, self.sample_width)
)
from . import effects
| {
"content_hash": "0c52981785d12e4426843d8b2ac9c10b",
"timestamp": "",
"source": "github",
"line_count": 811,
"max_line_length": 120,
"avg_line_length": 32.07521578298397,
"alnum_prop": 0.5496866951139815,
"repo_name": "cbelth/pyMusic",
"id": "6bcbb4d761b5719f0ff6ef74e3c81f3f08f959ff",
"size": "26013",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pydub/audio_segment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "94589"
}
],
"symlink_target": ""
} |
"""Package for GDM."""
import sys
__project__ = 'GDM'
__version__ = '0.3dev'
CLI = 'gdm'
VERSION = __project__ + '-' + __version__
DESCRIPTION = 'A very basic language-agnostic "dependency manager" using Git.'
PYTHON_VERSION = 3, 3
if not sys.version_info >= PYTHON_VERSION: # pragma: no cover (manual test)
exit("Python {}.{}+ is required.".format(*PYTHON_VERSION))
try:
from .commands import install, uninstall
except ImportError: # pragma: no cover (manual test)
pass
| {
"content_hash": "9b279e5a44df0de42070453598ae24a4",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 78,
"avg_line_length": 24.55,
"alnum_prop": 0.6578411405295316,
"repo_name": "jacebrowning/gdm-demo",
"id": "0cd8035230d2edd1c9826831642296ae2139021c",
"size": "491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gdm/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "7060"
},
{
"name": "Python",
"bytes": "36708"
}
],
"symlink_target": ""
} |
import json
from django.views.generic import View
from django import http
from django.shortcuts import get_object_or_404
from django import forms
from trix.trix_core import models
class HowSolvedForm(forms.ModelForm):
class Meta:
model = models.HowSolved
fields = ['howsolved']
class HowsolvedView(View):
"""
View of how the assignment was solved.
"""
http_method_names = ['post', 'delete']
def _bad_request_response(self, data):
return http.HttpResponseBadRequest(json.dumps(data), content_type='application/json')
def _not_found_response(self, data):
return http.HttpResponseNotFound(json.dumps(data), content_type='application/json')
def _200_response(self, data):
return http.HttpResponse(json.dumps(data), content_type='application/json')
def _get_assignment(self):
return get_object_or_404(models.Assignment, id=self.kwargs['assignment_id'])
def _get_howsolved(self, assignment_id):
return models.HowSolved.objects\
.filter(assignment_id=assignment_id, user=self.request.user)\
.get()
def post(self, request, **kwargs):
try:
data = json.loads(request.body)
except ValueError:
return self._bad_request_response({
'error': 'Invalid JSON data.'
})
form = HowSolvedForm(data)
if form.is_valid():
howsolved = form.cleaned_data['howsolved']
assignment = self._get_assignment()
try:
howsolvedobject = self._get_howsolved(assignment.id)
except models.HowSolved.DoesNotExist:
howsolvedobject = models.HowSolved.objects.create(
howsolved=howsolved,
assignment=assignment,
user=request.user)
else:
howsolvedobject.howsolved = howsolved
howsolvedobject.save()
return self._200_response({'howsolved': howsolvedobject.howsolved})
else:
return self._bad_request_response({
'error': form.errors.as_text()
})
def delete(self, request, **kwargs):
try:
howsolved = self._get_howsolved(self.kwargs['assignment_id'])
except models.HowSolved.DoesNotExist:
return self._not_found_response({
'message': 'No HowSolved for this user and assignment.'
})
else:
howsolved.delete()
return self._200_response({'success': True})
| {
"content_hash": "342c8105b24bd4fe74f68d4998278ff8",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 93,
"avg_line_length": 33,
"alnum_prop": 0.6017871017871018,
"repo_name": "devilry/trix2",
"id": "e290a0a2d8589338d2c922be4d74a9044a7517c9",
"size": "2574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trix/trix_student/views/howsolved.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "10237"
},
{
"name": "HTML",
"bytes": "55900"
},
{
"name": "Less",
"bytes": "25084"
},
{
"name": "Procfile",
"bytes": "83"
},
{
"name": "Python",
"bytes": "145725"
}
],
"symlink_target": ""
} |
from app.enums import FileCategory
from app.repository import alv_repository
from app.exceptions.base import ResourceNotFoundException
from app.service import file_service
from werkzeug.utils import secure_filename
def save_alv(alv):
alv_repository.save(alv)
def add_minutes(alv, minutes_file):
_file = file_service.add_file(FileCategory.ALV_DOCUMENT,
minutes_file, minutes_file.filename)
alv.minutes_file_id = _file.id
alv_repository.save(alv)
def find_all_alv():
return alv_repository.find_all_alv()
def find_alv_by_id(alv_id, include_presidium, include_documents):
return alv_repository.find_alv_by_id(alv_id,
include_presidium=include_presidium,
include_documents=include_documents)
def get_alv_by_id(alv_id, include_presidium=True, include_documents=False):
alv = find_alv_by_id(alv_id, include_presidium, include_documents)
if not alv:
raise ResourceNotFoundException("alv", alv_id)
return alv
def find_alv_document_by_id(alv_document_id, include_versions):
return alv_repository.find_alv_document_by_id(
alv_document_id, include_versions)
def get_alv_document_by_id(alv_document_id, include_versions=False):
alv_document = find_alv_document_by_id(alv_document_id, include_versions)
if not alv_document:
raise ResourceNotFoundException("alv document", alv_document_id)
return alv_document
def get_alv_document_version_filename(alv_document, version_number,
_file, locale=None):
basename = alv_document.get_localized_basename()
fn = secure_filename(basename)
if version_number > 1:
fn += "_v{}".format(version_number)
return fn
def get_alv_document_version_file(alv_document_version):
_file = file_service.get_file_by_id(alv_document_version.file_id)
return _file
def get_alv_minutes_file(alv):
_file = file_service.get_file_by_id(alv.minutes_file_id)
return _file
def get_alv_minutes_filename(alv, _file):
basename = alv.get_localized_basename()
fn = "{}_minutes".format(secure_filename(basename))
if len(_file.extension) > 0:
fn += "." + _file.extension
return fn
def add_document(alv, file_storage, nl_name, en_name):
alv_document = alv_repository.create_document()
alv_document.alv = alv
alv_document.en_name = en_name
alv_document.nl_name = nl_name
add_document_version(alv_document, file_storage)
alv_repository.save_document(alv_document)
def add_document_version(alv_document, file_storage):
_file = file_service.add_file(FileCategory.ALV_DOCUMENT,
file_storage, file_storage.filename)
alv_doc_version = alv_repository.create_document_version()
alv_doc_version.alv_document = alv_document
alv_doc_version.file = _file
alv_repository.save_document_version(alv_doc_version)
def update_document(alv_document, file_storage, nl_name, en_name):
"""Update a ALV document's names and add a new version."""
alv_document.nl_name = nl_name
alv_document.en_name = en_name
if file_storage:
add_document_version(alv_document, file_storage)
alv_repository.save_document(alv_document)
def delete_alv(alv):
alv_repository.delete_alv(alv)
| {
"content_hash": "c9943b9262a299c268fbc848a2defb63",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 77,
"avg_line_length": 28.905982905982906,
"alnum_prop": 0.6759314015375517,
"repo_name": "viaict/viaduct",
"id": "606f72c382c5debf7ed3420d426314e256cfc3fa",
"size": "3382",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "app/service/alv_service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1583078"
},
{
"name": "Dockerfile",
"bytes": "1131"
},
{
"name": "HTML",
"bytes": "227955"
},
{
"name": "JavaScript",
"bytes": "63026"
},
{
"name": "Makefile",
"bytes": "896"
},
{
"name": "Python",
"bytes": "770976"
},
{
"name": "Shell",
"bytes": "3004"
},
{
"name": "TypeScript",
"bytes": "3288"
},
{
"name": "Vue",
"bytes": "27869"
}
],
"symlink_target": ""
} |
from mock import patch
from .utils import unittest, ArgyleTest
from argyle import nginx
class NginxTest(ArgyleTest):
"Base for setting up necessary patches."
package = 'argyle.nginx'
patched_commands = ['sudo', 'files', 'upload_template', 'restart_service', ]
class EnableDisableSitesTest(NginxTest):
"Enabling and disabling site configurations."
def test_remove_default_site(self):
"Remove default site if it exists."
self.mocks['files'].exists.return_value = True
nginx.remove_default_site()
self.assertSudoCommand('rm /etc/nginx/sites-enabled/default')
def test_default_site_already_removed(self):
"Ignore removing default site if it is already removed."
self.mocks['files'].exists.return_value = False
nginx.remove_default_site()
self.assertFalse(self.mocks['sudo'].called)
def test_enable_site(self):
"Enable a site in sites-available."
self.mocks['files'].exists.return_value = True
nginx.enable_site('foo')
self.assertSudoCommand('ln -s -f /etc/nginx/sites-available/foo /etc/nginx/sites-enabled/foo')
# Restart should be called
self.assertTrue(self.mocks['restart_service'].called)
def test_enable_missing_site(self):
"Abort if attempting to enable a site which is not available."
self.mocks['files'].exists.return_value = False
with patch('argyle.nginx.abort') as abort:
nginx.enable_site('foo')
self.assertTrue(abort.called)
# Restart should not be called
self.assertFalse(self.mocks['restart_service'].called)
def test_disable_site(self):
"Remove a site from sites-enabled."
self.mocks['files'].exists.return_value = True
nginx.disable_site('foo')
self.assertSudoCommand('rm /etc/nginx/sites-enabled/foo')
# Restart should be called
self.assertTrue(self.mocks['restart_service'].called)
def test_disable_site_already_removed(self):
"Ignore removing a site if it is already removed."
self.mocks['files'].exists.return_value = False
nginx.disable_site('foo')
self.assertFalse(self.mocks['sudo'].called)
# Restart should not be called
self.assertFalse(self.mocks['restart_service'].called)
class UploadSiteTest(NginxTest):
"Upload site configuration via template."
def test_default_upload(self):
"Upload default site configuration."
with patch('argyle.nginx.enable_site') as enable:
nginx.upload_nginx_site_conf('test')
# No additional context by default
self.assertTemplateContext(None)
# Upload template will look for templates in the given order
self.assertTemplateUsed([u'nginx/test.conf', u'nginx/site.conf'])
self.assertTemplateDesination('/etc/nginx/sites-available/test')
# Site will be enabled by default
self.assertTrue(enable.called)
def test_explicit_template_name(self):
"Override template name for upload."
with patch('argyle.nginx.enable_site') as enable:
nginx.upload_nginx_site_conf('test', template_name='test.conf')
# Upload template will look for templates in the given order
self.assertTemplateUsed('test.conf')
self.assertTemplateDesination('/etc/nginx/sites-available/test')
def test_additional_context(self):
"Pass additional context to the template."
with patch('argyle.nginx.enable_site') as enable:
nginx.upload_nginx_site_conf('test', context={'foo': 'bar'})
self.assertTemplateContext({'foo': 'bar'})
def test_upload_without_enabling(self):
"Upload site configuration but don't enable."
with patch('argyle.nginx.enable_site') as enable:
nginx.upload_nginx_site_conf('test', enable=False)
self.assertFalse(enable.called)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "ef0e74fca773e21e95a72d3dca362ff8",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 102,
"avg_line_length": 40.2,
"alnum_prop": 0.6557213930348259,
"repo_name": "mlavin/argyle",
"id": "6a365c18bc486f3cf44cde0716d25bd235383662",
"size": "4020",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "argyle/tests/test_nginx.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "71211"
},
{
"name": "Ruby",
"bytes": "248"
},
{
"name": "Shell",
"bytes": "5096"
}
],
"symlink_target": ""
} |
from .utils import write_dict_to_json
from .utils import write_dict_to_csv
| {
"content_hash": "9eb30a832f8552672b64c309b48c03ca",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 37,
"avg_line_length": 37.5,
"alnum_prop": 0.7866666666666666,
"repo_name": "howsunjow/YahooFinance",
"id": "f00ffc2c56171d4945ddfad93ac5d0343b00fabb",
"size": "75",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yahoo_finance/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7372"
}
],
"symlink_target": ""
} |
""" Sahana Eden Inventory Model
@copyright: 2009-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3InventoryModel",
"S3TrackingModel",
"S3AdjustModel",
"inv_tabs",
"inv_warehouse_rheader",
"inv_recv_crud_strings",
"inv_recv_rheader",
"inv_send_rheader",
"inv_recv_rheader",
"inv_ship_status",
"inv_adj_rheader",
]
from gluon import *
from gluon.sqlhtml import RadioWidget
from gluon.storage import Storage
from ..s3 import *
SHIP_STATUS_IN_PROCESS = 0
SHIP_STATUS_RECEIVED = 1
SHIP_STATUS_SENT = 2
SHIP_STATUS_CANCEL = 3
# To pass to global scope
inv_ship_status = {
"IN_PROCESS" : SHIP_STATUS_IN_PROCESS,
"RECEIVED" : SHIP_STATUS_RECEIVED,
"SENT" : SHIP_STATUS_SENT,
"CANCEL" : SHIP_STATUS_CANCEL,
}
T = current.T
shipment_status = { SHIP_STATUS_IN_PROCESS: T("In Process"),
SHIP_STATUS_RECEIVED: T("Received"),
SHIP_STATUS_SENT: T("Sent"),
SHIP_STATUS_CANCEL: T("Canceled") }
SHIP_DOC_PENDING = 0
SHIP_DOC_COMPLETE = 1
itn_label = T("Item Source Tracking Number")
# Overwrite the label until we have a better way to do this
itn_label = T("CTN")
tn_label = T("Tracking Number")
# =============================================================================
class S3InventoryModel(S3Model):
"""
Inventory Management
A module to record inventories of items at a location (site)
"""
names = ["inv_inv_item",
"inv_item_id",
"inv_item_represent",
"inv_prep",
]
def model(self):
T = current.T
db = current.db
auth = current.auth
s3 = current.response.s3
settings = current.deployment_settings
org_id = self.org_organisation_id
item_id = self.supply_item_entity_id
supply_item_id = self.supply_item_id
item_pack_id = self.supply_item_pack_id
currency_type = s3.currency_type
org_site_represent = self.org_site_represent
item_pack_virtualfields = self.supply_item_pack_virtualfields
s3_date_format = settings.get_L10n_date_format()
s3_date_represent = lambda dt: S3DateTime.date_represent(dt, utc=True)
# =====================================================================
# Inventory Item
#
tablename = "inv_inv_item"
# ondelete references have been set to RESTRICT because the stock items
# should never be automatically deleted
table = self.define_table(tablename,
self.super_link("site_id",
"org_site",
label = T("Warehouse"),
default = auth.user.site_id if auth.is_logged_in() else None,
readable = True,
writable = True,
empty = False,
ondelete = "RESTRICT",
# Comment these to use a Dropdown & not an Autocomplete
#widget = S3SiteAutocompleteWidget(),
#comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Inventory"),
# T("Enter some characters to bring up a list of possible matches"))),
represent=org_site_represent),
item_id,
supply_item_id(ondelete = "RESTRICT"),
item_pack_id(ondelete = "RESTRICT"),
Field("quantity",
"double",
label = T("Quantity"),
notnull = True,
requires = IS_FLOAT_IN_RANGE(0,None),
writable = False),
Field("pack_value",
"double",
label = T("Value per Pack")),
# @ToDo: Move this into a Currency Widget for the pack_value field
currency_type("currency"),
#Field("pack_quantity",
# "double",
# compute = record_pack_quantity), # defined in 06_supply
Field("expiry_date", "date",
label = T("Expiry Date"),
requires = IS_NULL_OR(IS_DATE(format = s3_date_format)),
represent = s3_date_represent,
widget = S3DateWidget()
),
Field("bin",
"string",
length = 16,
),
Field("item_source_no",
"string",
length = 16,
label = itn_label,
),
org_id(name = "supply_org_id",
label = "Supplying Organization",
ondelete = "SET NULL"), # original donating org
# @ToDo: Allow items to be marked as 'still on the shelf but allocated to an outgoing shipment'
#Field("status"),
s3.comments(),
*s3.meta_fields())
table.virtualfields.append(item_pack_virtualfields(tablename=tablename))
table.virtualfields.append(InvItemVirtualFields())
# CRUD strings
INV_ITEM = T("Warehouse Stock")
ADD_INV_ITEM = T("Add Stock to Warehouse")
LIST_INV_ITEMS = T("List Stock in Warehouse")
s3.crud_strings[tablename] = Storage(
title_create = ADD_INV_ITEM,
title_display = T("Warehouse Stock Details"),
title_list = LIST_INV_ITEMS,
title_update = T("Edit Warehouse Stock"),
title_search = T("Search Warehouse Stock"),
title_upload = T("Import Warehouse Stock"),
subtitle_create = ADD_INV_ITEM,
subtitle_list = T("Warehouse Stock"),
label_list_button = LIST_INV_ITEMS,
label_create_button = ADD_INV_ITEM,
label_delete_button = T("Remove Stock from Warehouse"),
msg_record_created = T("Stock added to Warehouse"),
msg_record_modified = T("Warehouse Stock updated"),
msg_record_deleted = T("Stock removed from Warehouse"),
msg_list_empty = T("No Stock currently registered in this Warehouse"))
# Reusable Field
inv_item_id = S3ReusableField("inv_item_id", db.inv_inv_item,
requires = IS_ONE_OF(db,
"inv_inv_item.id",
self.inv_item_represent,
orderby="inv_inv_item.id",
sort=True),
represent = self.inv_item_represent,
label = INV_ITEM,
comment = DIV( _class="tooltip",
_title="%s|%s" % (INV_ITEM,
T("Select Stock from this Warehouse"))),
ondelete = "CASCADE",
script = SCRIPT("""
$(document).ready(function() {
S3FilterFieldChange({
'FilterField': 'inv_item_id',
'Field': 'item_pack_id',
'FieldResource': 'item_pack',
'FieldPrefix': 'supply',
'url': S3.Ap.concat('/inv/inv_item_packs/'),
'msgNoRecords': S3.i18n.no_packs,
'fncPrep': fncPrepItem,
'fncRepresent': fncRepresentItem
});
});"""),
)
report_filter = [
S3SearchSimpleWidget(
name="inv_item_search_text",
label=T("Search"),
comment=T("Search for an item by text."),
field=[ "item_id$name",
#"item_id$category_id$name",
#"site_id$name"
]
),
S3SearchOptionsWidget(
name="recv_search_site",
label=T("Facility"),
field=["site_id"],
represent ="%(name)s",
comment=T("If none are selected, then all are searched."),
cols = 2
),
S3SearchMinMaxWidget(
name="inv_item_search_expiry_date",
method="range",
label=T("Expiry Date"),
field=["expiry_date"]
)
]
# Item Search Method (Advanced Search only)
inv_item_search = S3Search(advanced=report_filter)
self.configure(tablename,
super_entity = "supply_item_entity",
list_fields = ["id",
# This is added in req/req_item_inv_item controller
#"site_id",
"item_id",
(T("Code"), "item_code"),
(T("Category"), "item_category"),
"site_id",
"quantity",
"pack_value",
(T("Total Value"), "total_value"),
"currency"
],
onvalidation = self.inv_inv_item_onvalidate,
search_method = inv_item_search,
report_filter = report_filter,
report_rows = ["item_id","currency"],
report_cols = ["site_id","currency"],
report_fact = ["quantity", (T("Total Value"), "total_value")],
report_method=["sum"],
report_groupby = self.inv_inv_item.site_id,
report_hide_comments = True,
deduplicate = self.inv_item_duplicate
)
# Component
self.add_component("inv_track_item",
inv_inv_item="inv_item_id")
# ---------------------------------------------------------------------
# Pass variables back to global scope (response.s3.*)
#
return Storage(
inv_item_id = inv_item_id,
inv_item_represent = self.inv_item_represent,
inv_prep = self.inv_prep,
)
# ---------------------------------------------------------------------
@staticmethod
def inv_inv_item_onvalidate(form):
"""
When a inv item record is being created with a source number
then the source number needs to be unique within the organisation.
"""
s3db = current.s3db
db = current.db
itable = s3db.inv_inv_item
stable = s3db.org_site
# If their is a tracking number check that it is unique within the org
if form.vars.item_source_no:
if form.record.item_source_no and form.record.item_source_no == form.vars.item_source_no:
# the tracking number hasn't changes so no validation needed
pass
else:
query = (itable.track_org_id == form.vars.track_org_id) & \
(itable.item_source_no == form.vars.item_source_no)
record = db(query).select(limitby=(0, 1)).first()
if record:
org_repr = current.response.s3.org_organisation_represent
form.errors.item_source_no = T("The Tracking Number %s is already used by %s.") % (form.vars.item_source_no,
org_repr(record.track_org_id))
@staticmethod
def inv_prep(r):
"""
Used in site REST controllers to Filter out items which are
already in this inventory
"""
if r.component:
db = current.db
s3db = current.s3db
if r.component.name == "inv_item":
table = s3db.inv_inv_item
# Filter out items which are already in this inventory
query = (table.site_id == r.record.site_id) & \
(table.deleted == False)
inv_item_rows = db(query).select(table.item_id)
item_ids = [row.item_id for row in inv_item_rows]
# Ensure that the current item CAN be selected
if r.method == "update":
item_ids.remove(table[r.args[2]].item_id)
table.item_id.requires.set_filter(not_filterby = "id",
not_filter_opts = item_ids)
elif r.component.name == "send":
# Default to the Search tab in the location selector
current.response.s3.gis.tab = "search"
if current.request.get_vars.get("select", "sent") == "incoming":
# Display only incoming shipments which haven't been received yet
filter = (s3db.inv_send.status == SHIP_STATUS_SENT)
#r.resource.add_component_filter("send", filter)
# ---------------------------------------------------------------------
@staticmethod
def inv_item_represent(id):
"""
"""
db = current.db
s3db = current.s3db
itable = s3db.inv_inv_item
stable = s3db.supply_item
query = (itable.id == id) & \
(itable.item_id == stable.id)
record = db(query).select(stable.name,
limitby = (0, 1)).first()
if record:
return record.name
else:
return None
@staticmethod
def inv_item_duplicate(job):
"""
Rules for finding a duplicate:
- Look for a record with the same site,
bin,
supply item and,
pack item
If a item is added as part of an inv_track_item import then the
quantity will be set to zero. This will overwrite and existing
total, if we have a duplicate. If the total was None then
validation would fail (it's a not null field). So if a duplicate
is found then the quantity needs to be removed.
"""
if job.tablename == "inv_inv_item":
table = job.table
site_id = "site_id" in job.data and job.data.site_id
item_id = "item_id" in job.data and job.data.item_id
pack_id = "item_pack_id" in job.data and job.data.item_pack_id
bin = "bin" in job.data and job.data.bin
query = (table.site_id == site_id) & \
(table.item_id == item_id) & \
(table.item_pack_id == pack_id) & \
(table.bin == bin)
id = duplicator(job, query)
if id:
if "quantity" in job.data and job.data.quantity == 0:
job.data.quantity = table[id].quantity
class S3TrackingModel(S3Model):
"""
A module to manage the shipment of inventory items
- Sent Items
- Received Items
- And audit trail of the shipment process
"""
names = ["inv_send",
"inv_send_represent",
"inv_recv",
"inv_recv_represent",
"inv_track_item",
]
def model(self):
current.manager.load("inv_adj_item")
T = current.T
db = current.db
auth = current.auth
s3 = current.response.s3
settings = current.deployment_settings
person_id = self.pr_person_id
org_id = self.org_organisation_id
item_id = self.supply_item_id
inv_item_id = self.inv_item_id
item_pack_id = self.supply_item_pack_id
currency_type = s3.currency_type
req_item_id = self.req_item_id
adj_item_id = self.adj_item_id
item_pack_virtualfields = self.supply_item_pack_virtualfields
org_site_represent = self.org_site_represent
messages = current.messages
NONE = messages.NONE
UNKNOWN_OPT = messages.UNKNOWN_OPT
s3_date_format = settings.get_L10n_date_format()
s3_date_represent = lambda dt: S3DateTime.date_represent(dt, utc=True)
# =====================================================================
# Send (Outgoing / Dispatch / etc)
#
tablename = "inv_send"
table = self.define_table("inv_send",
Field("tracking_no",
"string",
label = tn_label,
writable = False
),
person_id(name = "sender_id",
label = T("Sent By"),
default = auth.s3_logged_in_person(),
ondelete = "SET NULL",
comment = self.pr_person_comment(child="sender_id")),
self.super_link("site_id",
"org_site",
label = T("From Facility"),
default = auth.user.site_id if auth.is_logged_in() else None,
readable = True,
writable = True,
represent=org_site_represent,
ondelete = "SET NULL"
),
Field("date",
"date",
label = T("Date Sent"),
writable = False,
requires = IS_NULL_OR(IS_DATE(format = s3_date_format)),
represent = s3_date_represent,
widget = S3DateWidget()
),
person_id(name = "recipient_id",
label = T("To Person"),
ondelete = "SET NULL",
comment = self.pr_person_comment(child="recipient_id")),
Field("delivery_date",
"date",
label = T("Est. Delivery Date"),
requires = IS_NULL_OR(IS_DATE(format = s3_date_format)),
represent = s3_date_represent,
widget = S3DateWidget()
),
Field("to_site_id",
self.org_site,
label = T("To Facility"),
requires = IS_ONE_OF(db,
"org_site.site_id",
lambda id: org_site_represent(id, link = False),
sort=True,
),
ondelete = "SET NULL",
represent = org_site_represent
),
Field("status",
"integer",
requires = IS_NULL_OR(IS_IN_SET(shipment_status)),
represent = lambda opt: shipment_status.get(opt, UNKNOWN_OPT),
default = SHIP_STATUS_IN_PROCESS,
label = T("Status"),
writable = False,
),
Field("vehicle_type",
"string",
label = T("Type of Vehicle"),
),
Field("vehicle_plate_no",
"string",
label = T("Vehicle Plate Number"),
),
Field("driver_name",
"string",
label = T("Name of Driver"),
),
Field("time_in",
"time",
label = T("Time In"),
),
Field("time_out",
"time",
label = T("Time Out"),
),
s3.comments(),
*s3.meta_fields())
# CRUD strings
ADD_SEND = T("Send Shipment")
LIST_SEND = T("List Sent Shipments")
s3.crud_strings[tablename] = Storage(
title_create = ADD_SEND,
title_display = T("Sent Shipment Details"),
title_list = LIST_SEND,
title_update = T("Shipment to Send"),
title_search = T("Search Sent Shipments"),
subtitle_create = ADD_SEND,
subtitle_list = T("Sent Shipments"),
label_list_button = LIST_SEND,
label_create_button = ADD_SEND,
label_delete_button = T("Delete Sent Shipment"),
msg_record_created = T("Shipment Created"),
msg_record_modified = T("Sent Shipment updated"),
msg_record_deleted = T("Sent Shipment canceled"),
msg_list_empty = T("No Sent Shipments"))
# Reusable Field
send_id = S3ReusableField( "send_id", db.inv_send, sortby="date",
requires = IS_NULL_OR(IS_ONE_OF(db,
"inv_send.id",
self.inv_send_represent,
orderby="inv_send_id.date",
sort=True)),
represent = self.inv_send_represent,
label = T("Send Shipment"),
ondelete = "RESTRICT")
# it shouldn't be possible for the user to delete a send item
# unless *maybe* if it is pending and has no items referencing it
self.configure("inv_send",
deletable=False,
)
# Component
self.add_component("inv_track_item",
inv_send="send_id")
# Generate Consignment Note
self.set_method(tablename,
method="form",
action=self.inv_send_form )
# Redirect to the Items tabs after creation
send_item_url = URL(f="send", args=["[id]",
"track_item"])
self.configure(tablename,
onaccept = self.inv_send_onaccept,
create_next = send_item_url,
update_next = send_item_url)
# =====================================================================
# Received (In/Receive / Donation / etc)
#
inv_recv_type = { 0: NONE,
1: T("Other Warehouse"),
2: T("Donation"),
3: T("Supplier"),
}
ship_doc_status = { SHIP_DOC_PENDING : T("Pending"),
SHIP_DOC_COMPLETE : T("Complete") }
radio_widget = lambda field, value: \
RadioWidget().widget(field, value, cols = 2)
tablename = "inv_recv"
table = self.define_table("inv_recv",
Field("tracking_no",
"string",
label = tn_label,
writable = False
),
person_id(name = "sender_id",
label = T("Sent By Person"),
ondelete = "SET NULL",
comment = self.pr_person_comment(child="sender_id"),
),
Field("from_site_id",
"reference org_site",
label = T("From Facility"),
ondelete = "SET NULL",
represent = org_site_represent
),
Field("eta", "date",
label = T("Date Expected"),
writable = False,
requires = IS_NULL_OR(IS_DATE(format = s3_date_format)),
represent = s3_date_represent,
widget = S3DateWidget()
),
person_id(name = "recipient_id",
label = T("Received By"),
ondelete = "SET NULL",
default = auth.s3_logged_in_person(),
comment = self.pr_person_comment(child="recipient_id")),
Field("site_id",
"reference org_site",
label=T("By Facility"),
ondelete = "SET NULL",
default = auth.user.site_id if auth.is_logged_in() else None,
readable = True,
writable = True,
widget = S3SiteAutocompleteWidget(),
represent=org_site_represent),
Field("date", "date",
label = T("Date Received"),
requires = IS_NULL_OR(IS_DATE(format = s3_date_format)),
represent = s3_date_represent,
widget = S3DateWidget(),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Date Received"),
T("Will be filled automatically when the Shipment has been Received"))
)
),
Field("type",
"integer",
requires = IS_NULL_OR(IS_IN_SET(inv_recv_type)),
represent = lambda opt: inv_recv_type.get(opt, UNKNOWN_OPT),
label = T("Type"),
default = 0,
),
Field("status",
"integer",
requires = IS_NULL_OR(IS_IN_SET(shipment_status)),
represent = lambda opt: shipment_status.get(opt, UNKNOWN_OPT),
default = SHIP_STATUS_IN_PROCESS,
label = T("Status"),
writable = False,
),
Field("grn_status",
"integer",
requires = IS_NULL_OR(IS_IN_SET(ship_doc_status)),
represent = lambda opt: ship_doc_status.get(opt, UNKNOWN_OPT),
default = SHIP_DOC_PENDING,
widget = radio_widget,
label = T("GRN Status"),
comment = DIV( _class="tooltip",
_title="%s|%s" % (T("GRN Status"),
T("Has the GRN (Goods Received Note) been completed?"))),
),
Field("cert_status",
"integer",
requires = IS_NULL_OR(IS_IN_SET(ship_doc_status)),
represent = lambda opt: ship_doc_status.get(opt, UNKNOWN_OPT),
default = SHIP_DOC_PENDING,
widget = radio_widget,
label = T("Certificate Status"),
comment = DIV( _class="tooltip",
_title="%s|%s" % (T("Certificate Status"),
T("Has the Certificate for receipt of the shipment been given to the sender?"))),
),
s3.comments(),
*s3.meta_fields())
# CRUD Strings
inv_recv_crud_strings()
if settings.get_inv_shipment_name() == "order":
recv_id_label = T("Order")
else:
recv_id_label = T("Receive Shipment")
# Reusable Field
recv_id = S3ReusableField("recv_id", db.inv_recv, sortby="date",
requires = IS_NULL_OR(IS_ONE_OF(db,
"inv_recv.id",
self.inv_recv_represent,
orderby="inv_recv.date",
sort=True)),
represent = self.inv_recv_represent,
label = recv_id_label,
ondelete = "RESTRICT")
# Search Method
if settings.get_inv_shipment_name() == "order":
recv_search_comment = T("Search for an order by looking for text in any field.")
recv_search_date_field = "eta"
recv_search_date_comment = T("Search for an order expected between these dates")
else:
recv_search_comment = T("Search for a shipment by looking for text in any field.")
recv_search_date_field = "date"
recv_search_date_comment = T("Search for a shipment received between these dates")
recv_search = S3Search(
simple=(S3SearchSimpleWidget(
name="recv_search_text_simple",
label=T("Search"),
comment=recv_search_comment,
field=[ "from_person",
"comments",
"from_site_id$name",
"recipient_id$first_name",
"recipient_id$middle_name",
"recipient_id$last_name",
"site_id$name"
]
)),
advanced=(S3SearchSimpleWidget(
name="recv_search_text_advanced",
label=T("Search"),
comment=recv_search_comment,
field=[ "from_person",
"comments",
"from_site_id$name",
"recipient_id$first_name",
"recipient_id$middle_name",
"recipient_id$last_name",
"site_id$name"
]
),
S3SearchMinMaxWidget(
name="recv_search_date",
method="range",
label=table[recv_search_date_field].label,
comment=recv_search_date_comment,
field=[recv_search_date_field]
),
S3SearchOptionsWidget(
name="recv_search_site",
label=T("Facility"),
field=["site_id"],
represent ="%(name)s",
cols = 2
),
S3SearchOptionsWidget(
name="recv_search_status",
label=T("Status"),
field=["status"],
cols = 2
),
S3SearchOptionsWidget(
name="recv_search_grn",
label=T("GRN Status"),
field=["grn_status"],
cols = 2
),
S3SearchOptionsWidget(
name="recv_search_cert",
label=T("Certificate Status"),
field=["grn_status"],
cols = 2
),
))
# Redirect to the Items tabs after creation
recv_item_url = URL(f="recv", args=["[id]",
"track_item"])
# it shouldn't be possible for the user to delete a send item
self.configure("inv_recv",
deletable=False,
)
self.configure(tablename,
search_method = recv_search,
create_next = recv_item_url,
update_next = recv_item_url)
# Component
self.add_component("inv_track_item",
inv_recv="recv_id")
# Print Forms
self.set_method(tablename,
method="form",
action=self.inv_recv_form)
self.set_method(tablename,
method="cert",
action=self.inv_recv_donation_cert )
# =====================================================================
# Tracking Items
#
tracking_status = {0 : T("Unknown"),
1 : T("Preparing"),
2 : T("In transit"),
3 : T("Unloading"),
4 : T("Arrived"),
5 : T("Canceled"),
}
# @todo add the optional adj_id
tablename = "inv_track_item"
table = self.define_table("inv_track_item",
org_id(name = "track_org_id",
label = T("Shipping Organization"),
ondelete = "SET NULL",
readable = False,
writable = False),
Field("item_source_no",
"string",
length = 16,
label = itn_label,
),
Field("status",
"integer",
required = True,
requires = IS_IN_SET(tracking_status),
default = 1,
represent = lambda opt: tracking_status[opt],
writable = False),
inv_item_id(name="send_stock_id",
ondelete = "RESTRICT",
script = SCRIPT("""
$(document).ready(function() {
S3FilterFieldChange({
'FilterField': 'send_stock_id',
'Field': 'item_pack_id',
'FieldResource': 'item_pack',
'FieldPrefix': 'supply',
'url': S3.Ap.concat('/inv/inv_item_packs/'),
'msgNoRecords': S3.i18n.no_packs,
'fncPrep': fncPrepItem,
'fncRepresent': fncRepresentItem
});
});""") # need to redefine the script because of the change in the field name :/
), # original inventory
item_id(ondelete = "RESTRICT"), # supply item
item_pack_id(ondelete = "SET NULL"), # pack table
Field("quantity",
"double",
label = T("Quantity Sent"),
notnull = True),
Field("recv_quantity",
"double",
label = T("Quantity Received"),
represent = self.qnty_recv_repr,
readable = False,
writable = False,),
currency_type("currency"),
Field("pack_value",
"double",
label = T("Value per Pack")),
Field("expiry_date", "date",
label = T("Expiry Date"),
#requires = IS_NULL_OR(IS_DATE(format = s3_date_format)),
represent = s3_date_represent,
widget = S3DateWidget()
),
Field("bin", # The bin at origin
"string",
length = 16,
),
send_id(), # send record
recv_id(), # receive record
inv_item_id(name="recv_stock_id",
label = "Receiving Inventory",
required = False,
readable = False,
writable = False,
ondelete = "RESTRICT"), # received inventory
Field("recv_bin", # The bin at destination
"string",
length = 16,
readable = False,
writable = False,
widget = S3InvBinWidget("inv_track_item")
),
org_id(name = "supply_org_id",
label = "Supplying Organization",
ondelete = "SET NULL"), # original donating org
adj_item_id(ondelete = "RESTRICT"), # any adjustment record
s3.comments(),
req_item_id(readable = False,
writable = False),
*s3.meta_fields()
)
# pack_quantity virtual field
table.virtualfields.append(item_pack_virtualfields(tablename=tablename))
# CRUD strings
ADD_SEND_ITEM = T("Add Item to Shipment")
LIST_SEND_ITEMS = T("List Sent Items")
s3.crud_strings[tablename] = Storage(
title_create = ADD_SEND_ITEM,
title_display = T("Sent Item Details"),
title_list = LIST_SEND_ITEMS,
title_update = T("Edit Sent Item"),
title_search = T("Search Sent Items"),
subtitle_create = T("Add New Sent Item"),
subtitle_list = T("Shipment Items"),
label_list_button = LIST_SEND_ITEMS,
label_create_button = ADD_SEND_ITEM,
label_delete_button = T("Delete Sent Item"),
msg_record_created = T("Item Added to Shipment"),
msg_record_modified = T("Sent Item updated"),
msg_record_deleted = T("Sent Item deleted"),
msg_list_empty = T("No Sent Items currently registered"))
# Update owned_by_role to the send's owned_by_role
self.configure(tablename,
onaccept = self.inv_track_item_onaccept,
onvalidation = self.inv_track_item_onvalidate,
)
# ---------------------------------------------------------------------
# Pass variables back to global scope (response.s3.*)
#
return Storage(inv_track_item_deleting = self.inv_track_item_deleting,
)
# ---------------------------------------------------------------------
@staticmethod
def inv_send_represent(id):
"""
"""
if id:
db = current.db
s3db = current.s3db
table = s3db.inv_send
send_row = db(table.id == id).select(table.date,
table.to_site_id,
limitby=(0, 1)).first()
return SPAN(table.to_site_id.represent(send_row.to_site_id),
" - ",
table.date.represent(send_row.date)
)
else:
return current.messages.NONE
@staticmethod
def inv_send_onaccept(form):
"""
When a inv send record is created then create the tracking number.
"""
s3db = current.s3db
db = current.db
stable = s3db.inv_send
oldTotal = 0
# If the tracking number is blank then set it up
if not form.record:
id = form.vars.id
db(stable.id == id).update(tracking_no = "TN-%07d" % (10000+id))
# ---------------------------------------------------------------------
@staticmethod
def inv_send_form (r, **attr):
"""
Generate a PDF of a Consignment Note
"""
s3db = current.s3db
table = s3db.inv_send
tracktable = s3db.inv_track_item
table.date.readable = True
record = table[r.id]
site_id = record.site_id
site = table.site_id.represent(site_id,False)
# hide the inv_item field
tracktable.send_stock_id.readable = False
tracktable.recv_stock_id.readable = False
exporter = S3PDF()
return exporter(r,
method="list",
componentname="inv_track_item",
formname="Waybill",
filename="Waybill-%s" % site,
report_hide_comments=True,
**attr
)
# ---------------------------------------------------------------------
@staticmethod
def inv_recv_represent(id):
"""
@ToDo: 'From Organisation' is great for Donations
(& Procurement if we make Suppliers Organisations), but isn't useful
for shipments between facilities within a single Org where
'From Facility' could be more appropriate
"""
if id:
db = current.db
s3db = current.s3db
table = s3db.inv_recv
inv_recv_row = db(table.id == id).select(table.date,
table.from_site_id,
limitby=(0, 1)).first()
return SPAN(table.from_site_id.represent(inv_recv_row.from_site_id),
" - ",
table.date.represent(inv_recv_row.date)
)
else:
return current.messages.NONE
# ---------------------------------------------------------------------
@staticmethod
def inv_recv_form (r, **attr):
"""
Generate a PDF of a GRN (Goods Received Note)
"""
T = current.T
s3db = current.s3db
table = s3db.inv_recv
table.date.readable = True
table.site_id.readable = True
table.site_id.label = T("By Warehouse")
table.site_id.represent = s3db.org_site_represent
record = table[r.id]
site_id = record.site_id
site = table.site_id.represent(site_id,False)
exporter = S3PDF()
return exporter(r,
method="list",
formname="Goods Received Note",
filename="GRN-%s" % site,
report_hide_comments=True,
componentname = "inv_track_item",
**attr
)
# -------------------------------------------------------------------------
@staticmethod
def inv_recv_donation_cert (r, **attr):
"""
Generate a PDF of a Donation certificate
"""
s3db = current.s3db
table = s3db.inv_recv
table.date.readable = True
table.type.readable = False
table.site_id.readable = True
table.site_id.label = T("By Warehouse")
table.site_id.represent = s3db.org_site_represent
record = table[r.id]
site_id = record.site_id
site = table.site_id.represent(site_id,False)
exporter = S3PDF()
return exporter(r,
method="list",
formname="Donation Certificate",
filename="DC-%s" % site,
report_hide_comments=True,
componentname = "inv_track_item",
**attr
)
# -------------------------------------------------------------------------
@staticmethod
def qnty_recv_repr(value):
if value:
return value
else:
return B(value)
# -------------------------------------------------------------------------
@staticmethod
def inv_track_item_onvalidate(form):
"""
When a track item record is being created with a tracking number
then the tracking number needs to be unique within the organisation.
If the stock is coming out of a warehouse then the stock details
need to be copied across (org, expiry etc)
If the stock is being received then their might be a selected bin
ensure that the correct bin is selected and save those details.
"""
s3db = current.s3db
db = current.db
ttable = s3db.inv_track_item
itable = s3db.inv_inv_item
stable = s3db.org_site
# save the organisation from where this tracking originates
if form.vars.send_stock_id:
query = (itable.id == form.vars.send_stock_id) & \
(itable.site_id == stable.id)
record = db(query).select(stable.organisation_id,
limitby=(0, 1)).first()
form.vars.track_org_id = record.organisation_id
# copy the data from the donated stock
if form.vars.send_stock_id:
query = (itable.id == form.vars.send_stock_id)
record = db(query).select(limitby=(0, 1)).first()
form.vars.item_id = record.item_id
form.vars.item_source_no = record.item_source_no
form.vars.expiry_date = record.expiry_date
form.vars.bin = record.bin
form.vars.supply_org_id = record.supply_org_id
# If no pack value was entered then copy the value from the inv item
if not form.vars.pack_value:
# @todo: move this into the javascript ajax call on the item selected
form.vars.pack_value = record.pack_value
# if we have no send id then copy the quantity sent directly into the received field
if not form.vars.send_id:
form.vars.recv_quantity = form.vars.quantity
# If their is a receiving bin select the right one
if form.vars.recv_bin:
if isinstance(form.vars.recv_bin, list):
if form.vars.recv_bin[1] != "":
form.vars.recv_bin = form.vars.recv_bin[1]
else:
form.vars.recv_bin = form.vars.recv_bin[0]
return
@staticmethod
def inv_track_item_onaccept(form):
"""
When a track item record is created and it is linked to an inv_item
then the inv_item quantity will be reduced.
"""
s3db = current.s3db
db = current.db
tracktable = s3db.inv_track_item
stocktable = s3db.inv_inv_item
stable = s3db.inv_send
rtable = s3db.inv_recv
oldTotal = 0
# only modify the original stock total if we have a quantity on the form
# Their'll not be one if it is being received since by then it is read only
if form.vars.quantity:
if form.record:
if form.record.send_stock_id != None:
oldTotal = form.record.quantity
db(stocktable.id == form.record.send_stock_id).update(quantity = stocktable.quantity + oldTotal)
newTotal = form.vars.quantity
db(stocktable.id == form.vars.send_stock_id).update(quantity = stocktable.quantity - newTotal)
if form.vars.send_id and form.vars.recv_id:
db(rtable.id == form.vars.recv_id).update(tracking_no = stable[form.vars.send_id].tracking_no)
# if the status is 3 unloading
# Move all the items into the site, update any request & make any adjustments
# Finally change the status to 4 arrived
id = form.vars.id
if tracktable[id].status == 3:
record = tracktable[id]
query = (stocktable.item_id == record.item_id) & \
(stocktable.item_pack_id == record.item_pack_id) & \
(stocktable.currency == record.currency) & \
(stocktable.pack_value == record.pack_value) & \
(stocktable.expiry_date == record.expiry_date) & \
(stocktable.bin == record.recv_bin) & \
(stocktable.supply_org_id == record.supply_org_id)
inv_item_row = db(query).select(stocktable.id,
limitby=(0, 1)).first()
if inv_item_row:
stock_id = inv_item_row.id
db(stocktable.id == stock_id).update(quantity = stocktable.quantity + record.recv_quantity)
else:
stock_id = stocktable.insert(site_id = rtable[record.recv_id].site_id,
item_id = record.item_id,
item_pack_id = record.item_pack_id,
currency = record.currency,
pack_value = record.pack_value,
expiry_date = record.expiry_date,
bin = record.recv_bin,
supply_org_id = record.supply_org_id,
quantity = record.recv_quantity,
item_source_no = record.item_source_no,
)
# if this is linked to a request then update the quantity fulfil
if record.req_item_id:
query = (ritable.id == track_item.req_item_id)
db(query).update(quantity_fulfil
= ritable.quantity_fulfil
+ record.quantity
)
db(tracktable.id == id).update(recv_stock_id = stock_id,
status = 4)
# If the receive quantity doesn't equal the sent quantity
# then an adjustment needs to be set up
if record.quantity != record.recv_quantity:
# De we have an adjustment record?
query = (tracktable.recv_id == recv_id) & \
(tracktable.adj_id != None)
record = db(query).select(tracktable.adj_id,
limitby = (0, 1)).first()
if record:
adj_id = record.adj_id
# If we don't yet have an adj record then create it
else:
adjtable = s3db.inv_adj
recv_rec = s3db.inv_recv[record.recv_id]
adj_id = adjtable.insert(adjuster_id = recv_rec.recipient_id,
site_id = recv_rec.site_id,
adjustment_date = current.request.now.date(),
category = 0,
status = 1,
comments = recv_rec.comments,
)
# Now create the adj item record
adjitemtable = s3db.inv_adj_item
adj_item_id = adjitemtable.insert(reason = 0,
adj_id = adj_id,
inv_item_id = record.send_stock_id, # original source inv_item
item_id = record.item_id, # the supply item
item_pack_id = record.item_pack_id,
old_quantity = record.quantity,
new_quantity = record.recv_quantity,
currency = record.currency,
pack_value = record.pack_value,
expiry_date = record.expiry_date,
bin = record.recv_bin,
comments = record.comments,
)
# copy the adj_item_id to the tracking record
db(tracktable.id == id).update(adj_item_id = adj_item_id)
@staticmethod
def inv_track_item_deleting(id):
"""
A track item can only be deleted if the status is Preparing
When a track item record is deleted and it is linked to an inv_item
then the inv_item quantity will be reduced.
"""
s3db = current.s3db
db = current.db
tracktable = s3db.inv_track_item
stocktable = s3db.inv_inv_item
ritable = s3db.req_req_item
record = tracktable[id]
if record.status != 1:
return False
# if this is linked to a request
# then remove these items from the quantity in transit
if record.req_item_id:
db(ritable.id == record.req_item_id).update(quantity_transit = ritable.quantity_transit - record.quantity)
# Check that we have a link to a warehouse
if record.send_stock_id:
trackTotal = record.quantity
# Remove the total from this record and place it back in the warehouse
db(stocktable.id == record.send_stock_id).update(quantity = stocktable.quantity + trackTotal)
db(tracktable.id == id).update(quantity = 0,
comments = "%sQuantity was: %s" % (stocktable.comments, trackTotal))
return True
# =============================================================================
def inv_tabs(r):
"""
Add an expandable set of Tabs for a Site's Inventory Tasks
@ToDo: Make these Expand/Contract without a server-side call
"""
T = current.T
s3db = current.s3db
auth = current.auth
session = current.session
settings = current.deployment_settings
if settings.has_module("inv") and \
auth.s3_has_permission("read", "inv_inv_item"):
collapse_tabs = settings.get_inv_collapse_tabs()
if collapse_tabs and not \
(r.tablename == "org_office" and r.record.type == 5): # 5 = Warehouse
# Test if the tabs are collapsed
show_collapse = True
show_inv = r.get_vars.show_inv
if show_inv == "True":
show_inv = True
elif show_inv == "False":
show_inv = False
else:
show_inv = None
if show_inv == True or show_inv == False:
session.s3.show_inv["%s_%s" % (r.name, r.id)] = show_inv
else:
show_inv = session.s3.show_inv.get("%s_%s" % (r.name, r.id))
else:
show_inv = True
show_collapse = False
if show_inv:
if settings.get_inv_shipment_name() == "order":
recv_tab = T("Orders")
else:
recv_tab = T("Receive")
inv_tabs = [(T("Warehouse Stock"), "inv_item"),
#(T("Incoming"), "incoming/"),
(recv_tab, "recv"),
(T("Send"), "send", dict(select="sent")),
]
if settings.has_module("proc"):
inv_tabs.append((T("Planned Procurements"), "plan"))
if show_collapse:
inv_tabs.append(("- %s" % T("Warehouse"),
None, dict(show_inv="False")))
else:
inv_tabs = [("+ %s" % T("Warehouse"), "inv_item",
dict(show_inv="True"))]
return inv_tabs
else:
return []
# =============================================================================
def inv_warehouse_rheader(r):
""" Resource Header for warehouse stock """
if r.representation != "html" or r.method == "import":
# RHeaders only used in interactive views
return None
s3 = current.response.s3
tablename, record = s3_rheader_resource(r)
rheader = None
if tablename == "org_organisation" or tablename == "org_office":
rheader = s3.org_rheader(r)
rfooter = TAG[""]()
if record and "id" in record:
if r.component and r.component.name == "inv_item":
as_btn = A( T("Adjust Stock"),
_href = URL(c = "inv",
f = "adj",
args = [record.id, "create"]
),
_class = "action-btn"
)
rfooter.append(as_btn)
else:
ns_btn = A( T("Receive New Stock"),
_href = URL(c = "inv",
f = "recv",
args = ["create"]
),
_class = "action-btn"
)
rfooter.append(ns_btn)
s3.rfooter = rfooter
return rheader
# =============================================================================
def inv_recv_crud_strings():
"""
CRUD Strings for inv_recv which ened to be visible to menus without a
model load
"""
if current.deployment_settings.get_inv_shipment_name() == "order":
recv_id_label = T("Order")
ADD_RECV = T("Add Order")
LIST_RECV = T("List Orders")
current.response.s3.crud_strings["inv_recv"] = Storage(
title_create = ADD_RECV,
title_display = T("Order Details"),
title_list = LIST_RECV,
title_update = T("Edit Order"),
title_search = T("Search Orders"),
subtitle_create = ADD_RECV,
subtitle_list = T("Orders"),
label_list_button = LIST_RECV,
label_create_button = ADD_RECV,
label_delete_button = T("Delete Order"),
msg_record_created = T("Order Created"),
msg_record_modified = T("Order updated"),
msg_record_deleted = T("Order canceled"),
msg_list_empty = T("No Orders registered")
)
else:
recv_id_label = T("Receive Shipment")
ADD_RECV = T("Receive Shipment")
LIST_RECV = T("List Received Shipments")
current.response.s3.crud_strings["inv_recv"] = Storage(
title_create = ADD_RECV,
title_display = T("Received Shipment Details"),
title_list = LIST_RECV,
title_update = T("Edit Received Shipment"),
title_search = T("Search Received Shipments"),
subtitle_create = ADD_RECV,
subtitle_list = T("Received Shipments"),
label_list_button = LIST_RECV,
label_create_button = ADD_RECV,
label_delete_button = T("Delete Received Shipment"),
msg_record_created = T("Shipment Created"),
msg_record_modified = T("Received Shipment updated"),
msg_record_deleted = T("Received Shipment canceled"),
msg_list_empty = T("No Received Shipments")
)
return
# =============================================================================
def inv_send_rheader(r):
""" Resource Header for Send """
if r.representation == "html" and r.name == "send":
record = r.record
if record:
s3db = current.s3db
auth = current.auth
s3 = current.response.s3
tabs = [(T("Edit Details"), None),
(T("Items"), "track_item"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rData = TABLE(
TR( TH("%s: " % table.date.label),
table.date.represent(record.date),
TH("%s: " % table.delivery_date.label),
table.delivery_date.represent(record.delivery_date),
),
TR( TH("%s: " % table.site_id.label),
table.site_id.represent(record.site_id),
TH("%s: " % table.to_site_id.label),
table.to_site_id.represent(record.to_site_id),
),
TR( TH("%s: " % table.status.label),
table.status.represent(record.status),
TH("%s: " % table.comments.label),
TD(record.comments or "", _colspan=3)
)
)
rSubdata = TABLE ()
rfooter = TAG[""]()
if record.status == SHIP_STATUS_IN_PROCESS:
if auth.s3_has_permission("update",
"inv_send",
record_id=record.id):
tracktable = current.s3db.inv_track_item
query = (tracktable.send_id == record.id) & \
(tracktable.send_stock_id == None) & \
(tracktable.deleted == False)
row = current.db(query).select(tracktable.id,
limitby=(0, 1)).first()
if row == None:
send_btn = A( T("Send Shipment"),
_href = URL(c = "inv",
f = "send_process",
args = [record.id]
),
_id = "send_process",
_class = "action-btn"
)
send_btn_confirm = SCRIPT("S3ConfirmClick('#send_process', '%s')"
% T("Do you want to send this shipment?") )
rfooter.append(send_btn)
rfooter.append(send_btn_confirm)
ritable = current.s3db.req_req_item
rcitable = current.s3db.req_commit_item
query = (tracktable.send_id == record.id) & \
(rcitable.req_item_id == tracktable.req_item_id) & \
(tracktable.req_item_id == ritable.id) & \
(tracktable.deleted == False)
records = current.db(query).select()
for record in records:
rSubdata.append(TR( TH("%s: " % ritable.item_id.label),
ritable.item_id.represent(record.req_req_item.item_id),
TH("%s: " % rcitable.quantity.label),
record.req_commit_item.quantity,
))
else:
cn_btn = A( T("Waybill"),
_href = URL(f = "send",
args = [record.id, "form"]
),
_class = "action-btn"
)
rfooter.append(cn_btn)
if record.status != SHIP_STATUS_CANCEL:
if record.status == SHIP_STATUS_SENT:
vars = current.request.vars
if "site_id" in vars and \
auth.s3_has_permission("update",
"org_site",
record_id=vars.site_id):
receive_btn = A( T("Process Received Shipment"),
_href = URL(c = "inv",
f = "recv_sent",
args = [record.id],
vars = vars
),
_id = "send_receive",
_class = "action-btn",
_title = T("Receive this shipment")
)
#receive_btn_confirm = SCRIPT("S3ConfirmClick('#send_receive', '%s')"
# % T("Receive this shipment?") )
rfooter.append(receive_btn)
#rheader.append(receive_btn_confirm)
if auth.s3_has_permission("update",
"inv_send",
record_id=record.id):
if "received" in vars:
s3db.inv_send[record.id] = \
dict(status = SHIP_STATUS_RECEIVED)
else:
receive_btn = A( T("Confirm Shipment Received"),
_href = URL(f = "send",
args = [record.id],
vars = dict(received = True),
),
_id = "send_receive",
_class = "action-btn",
_title = T("Only use this button to confirm that the shipment has been received by a destination which will not record the shipment directly into the system")
)
receive_btn_confirm = SCRIPT("S3ConfirmClick('#send_receive', '%s')"
% T("Confirm that the shipment has been received by a destination which will not record the shipment directly into the system and confirmed as received.") )
rfooter.append(receive_btn)
rfooter.append(receive_btn_confirm)
if auth.s3_has_permission("delete",
"inv_send",
record_id=record.id):
cancel_btn = A( T("Cancel Shipment"),
_href = URL(c = "inv",
f = "send_cancel",
args = [record.id]
),
_id = "send_cancel",
_class = "action-btn"
)
cancel_btn_confirm = SCRIPT("S3ConfirmClick('#send_cancel', '%s')"
% T("Do you want to cancel this sent shipment? The items will be returned to the Warehouse. This action CANNOT be undone!") )
rfooter.append(cancel_btn)
rfooter.append(cancel_btn_confirm)
s3.rfooter = rfooter
rheader = DIV (rData,
rheader_tabs,
rSubdata
)
return rheader
return None
# =============================================================================
def inv_recv_rheader(r):
""" Resource Header for Receiving """
if r.representation == "html" and r.name == "recv":
record = r.record
if record:
T = current.T
s3 = current.response.s3
auth = current.auth
tabs = [(T("Edit Details"), None),
(T("Items"), "track_item"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV( TABLE(
TR( TH( "%s: " % table.eta.label),
table.eta.represent(record.eta),
TH("%s: " % table.status.label),
table.status.represent(record.status),
),
TR( TH( "%s: " % table.date.label),
table.date.represent(record.date),
),
TR( TH( "%s: " % table.site_id.label),
table.site_id.represent(record.site_id),
),
TR( TH( "%s: " % table.from_site_id.label),
table.from_site_id.represent(record.from_site_id),
),
TR( TH( "%s: " % table.sender_id.label),
s3_fullname(record.sender_id),
TH( "%s: " % table.recipient_id.label),
s3_fullname(record.recipient_id),
),
TR( TH( "%s: " % table.comments.label),
TD(record.comments or "", _colspan=2),
),
),
rheader_tabs
)
rfooter = TAG[""]()
if record.status == SHIP_STATUS_SENT or \
record.status == SHIP_STATUS_IN_PROCESS:
if auth.s3_has_permission("update",
"inv_recv",
record_id=record.id):
tracktable = current.s3db.inv_track_item
query = (tracktable.recv_id == record.id) & \
(tracktable.recv_quantity == None)
row = current.db(query).select(tracktable.id,
limitby=(0, 1)).first()
if row == None:
recv_btn = A( T("Receive Shipment"),
_href = URL(c = "inv",
f = "recv_process",
args = [record.id]
),
_id = "recv_process",
_class = "action-btn"
)
recv_btn_confirm = SCRIPT("S3ConfirmClick('#recv_process', '%s')"
% T("Do you want to receive this shipment?") )
rfooter.append(recv_btn)
rfooter.append(recv_btn_confirm)
else:
msg = T("You need to check all item quantities and allocate to bins before you can receive the shipment")
rfooter.append(SPAN(msg))
else:
grn_btn = A( T("Goods Received Note"),
_href = URL(f = "recv",
args = [record.id, "form"]
),
_class = "action-btn"
)
rfooter.append(grn_btn)
dc_btn = A( T("Donation Certificate"),
_href = URL(f = "recv",
args = [record.id, "cert"]
),
_class = "action-btn"
)
rfooter.append(dc_btn)
if record.status == SHIP_STATUS_RECEIVED:
if current.auth.s3_has_permission("delete",
"inv_recv",
record_id=record.id):
cancel_btn = A( T("Cancel Shipment"),
_href = URL(c = "inv",
f = "recv_cancel",
args = [record.id]
),
_id = "recv_cancel",
_class = "action-btn"
)
cancel_btn_confirm = SCRIPT("S3ConfirmClick('#recv_cancel', '%s')"
% T("Do you want to cancel this received shipment? The items will be removed from the Warehouse. This action CANNOT be undone!") )
rfooter.append(cancel_btn)
rfooter.append(cancel_btn_confirm)
s3.rfooter = rfooter
return rheader
return None
# =============================================================================
class S3AdjustModel(S3Model):
"""
A module to manage the shipment of inventory items
- Sent Items
- Received Items
- And audit trail of the shipment process
"""
names = ["inv_adj",
"adj_id",
"inv_adj_item",
"adj_item_id",
]
def model(self):
T = current.T
db = current.db
auth = current.auth
s3 = current.response.s3
settings = current.deployment_settings
person_id = self.pr_person_id
org_id = self.org_organisation_id
item_id = self.supply_item_id
inv_item_id = self.inv_item_id
item_pack_id = self.supply_item_pack_id
currency_type = s3.currency_type
req_item_id = self.req_item_id
org_site_represent = self.org_site_represent
messages = current.messages
NONE = messages.NONE
UNKNOWN_OPT = messages.UNKNOWN_OPT
s3_date_format = settings.get_L10n_date_format()
s3_date_represent = lambda dt: S3DateTime.date_represent(dt, utc=True)
# =====================================================================
# Send (Outgoing / Dispatch / etc)
#
adjust_type = {0 : T("Shipment"),
1 : T("Inventory"),
}
adjust_status = {0 : T("In Process"),
1 : T("Complete"),
}
tablename = "inv_adj"
table = self.define_table("inv_adj",
person_id(name = "adjuster_id",
label = T("Actioning officer"),
ondelete = "RESTRICT",
default = auth.s3_logged_in_person(),
comment = self.pr_person_comment(child="adjuster_id")),
self.super_link("site_id",
"org_site",
ondelete = "SET NULL",
label = T("Warehouse"),
default = auth.user.site_id if auth.is_logged_in() else None,
readable = True,
writable = True,
empty = False,
represent=org_site_represent),
Field("adjustment_date",
"date",
label = T("Date of adjustment"),
default = current.request.utcnow,
writable = False,
represent = s3_date_represent,
widget = S3DateWidget()
),
Field("status",
"integer",
requires = IS_NULL_OR(IS_IN_SET(adjust_status)),
represent = lambda opt: adjust_status.get(opt, UNKNOWN_OPT),
default = 0,
label = T("Status of adjustment"),
writable = False,
),
Field("category",
"integer",
requires = IS_NULL_OR(IS_IN_SET(adjust_type)),
represent = lambda opt: adjust_type.get(opt, UNKNOWN_OPT),
default = 1,
label = T("Type of adjustment"),
writable = False,
),
s3.comments(),
*s3.meta_fields())
self.configure("inv_adj",
onaccept = self.inv_adj_onaccept,
)
# Reusable Field
adj_id = S3ReusableField( "adj_id",
db.inv_adj,
sortby="date",
requires = IS_NULL_OR(IS_ONE_OF(db,
"inv_adj.id",
self.inv_adj_represent,
orderby="inv_adj.adjustment_date",
sort=True)),
represent = self.inv_adj_represent,
label = T("Inventory Adjustment"),
ondelete = "RESTRICT")
adjust_reason = {0 : T("Unknown"),
1 : T("None"),
2 : T("Lost"),
3 : T("Damaged"),
4 : T("Expired"),
5 : T("Found"),
}
# @todo add the optional adj_id
tablename = "inv_adj_item"
table = self.define_table("inv_adj_item",
item_id(ondelete = "RESTRICT"), # supply item
Field("reason",
"integer",
required = True,
requires = IS_IN_SET(adjust_reason),
default = 1,
represent = lambda opt: adjust_reason[opt],
writable = False),
inv_item_id(ondelete = "RESTRICT",
writable = False), # original inventory
item_pack_id(ondelete = "SET NULL"), # pack table
Field("old_quantity",
"double",
label = T("Original Quantity"),
default = 0,
notnull = True,
writable = False),
Field("new_quantity",
"double",
label = T("Revised Quantity"),
represent = self.qnty_adj_repr,
),
currency_type("currency"),
Field("pack_value",
"double",
label = T("Value per Pack")),
Field("expiry_date",
"date",
label = T("Expiry Date"),
represent = s3_date_represent,
widget = S3DateWidget()
),
Field("bin",
"string",
length = 16,
),
adj_id(),
s3.comments(),
*s3.meta_fields()
)
# Reusable Field
adj_item_id = S3ReusableField( "adj_item_id",
db.inv_adj_item,
sortby="item_id",
requires = IS_NULL_OR(IS_ONE_OF(db,
"inv_adj_item.id",
self.inv_adj_item_represent,
orderby="inv_adj_item.item_id",
sort=True)),
represent = self.inv_adj_item_represent,
label = T("Inventory Adjustment Item"),
ondelete = "RESTRICT")
# Component
self.add_component("inv_adj_item",
inv_adj="adj_id")
return Storage(
adj_item_id = adj_item_id,
adj_id = adj_id,
)
# -------------------------------------------------------------------------
@staticmethod
def qnty_adj_repr(value):
if value:
return value
else:
return B(value)
# ---------------------------------------------------------------------
@staticmethod
def inv_adj_onaccept(form):
"""
When an adjustment record is created and it is of type inventory
then an adj_item record for each inv_inv_item in the site will be
created. If needed, extra adj_item records can be created later.
"""
s3db = current.s3db
db = current.db
stocktable = s3db.inv_inv_item
adjitemtable = s3db.inv_adj_item
adjtable = s3db.inv_adj
adj_rec = adjtable[form.vars.id]
if adj_rec.category == 1:
site_id = form.vars.site_id
# Only get stock items with a positive quantity
query = (stocktable.site_id == site_id) & \
(stocktable.quantity > 0) & \
(stocktable.deleted == False)
inv_item_row = db(query).select()
for inv_item in inv_item_row:
# add an adjustment item record
adjitemtable.insert(reason = 0,
adj_id = form.vars.id,
inv_item_id = inv_item.id, # original source inv_item
item_id = inv_item.item_id, # the supply item
item_pack_id = inv_item.item_pack_id,
old_quantity = inv_item.quantity,
currency = inv_item.currency,
pack_value = inv_item.pack_value,
expiry_date = inv_item.expiry_date,
bin = inv_item.bin,
)
# ---------------------------------------------------------------------
@staticmethod
def inv_adj_represent(id):
"""
"""
if id:
db = current.db
s3db = current.s3db
table = s3db.inv_adj
send_row = db(table.id == id).select(table.adjustment_date,
table.adjuster_id,
limitby=(0, 1)).first()
return SPAN(table.adjuster_id.represent(send_row.adjuster_id),
" - ",
table.adjustment_date.represent(send_row.adjustment_date)
)
else:
return current.messages.NONE
# ---------------------------------------------------------------------
@staticmethod
def inv_adj_item_represent(id):
"""
"""
if id:
db = current.db
s3db = current.s3db
table = s3db.inv_adj_item
adj_row = db(table.id == id).select(table.item_id,
table.old_quantity,
table.new_quantity,
table.item_pack_id,
limitby=(0, 1)).first()
return SPAN(table.item_id.represent(adj_row.item_id),
": ",
(adj_row.new_quantity - adj_row.old_quantity),
" ",
table.item_pack_id.represent(adj_row.item_pack_id)
)
else:
return current.messages.NONE
def inv_adj_rheader(r):
""" Resource Header for Inventory Adjustments """
if r.representation == "html" and r.name == "adj":
record = r.record
if record:
s3db = current.s3db
auth = current.auth
s3 = current.response.s3
tabs = [(T("Edit Details"), None),
(T("Items"), "adj_item"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV( TABLE(
TR( TH("%s: " % table.adjuster_id.label),
table.adjuster_id.represent(record.adjuster_id),
TH("%s: " % table.adjustment_date.label),
table.adjustment_date.represent(record.adjustment_date),
),
TR( TH("%s: " % table.site_id.label),
table.site_id.represent(record.site_id),
TH("%s: " % table.category.label),
table.category.represent(record.category),
),
),
rheader_tabs
)
rfooter = TAG[""]()
if record.status == 0: # In process
if auth.s3_has_permission("update",
"inv_adj",
record_id=record.id):
aitable = current.s3db.inv_adj_item
query = (aitable.adj_id == record.id) & \
(aitable.new_quantity == None)
row = current.db(query).select(aitable.id,
limitby=(0, 1)).first()
if row == None:
close_btn = A( T("Close Adjustment"),
_href = URL(c = "inv",
f = "adj_close",
args = [record.id]
),
_id = "adj_close",
_class = "action-btn"
)
close_btn_confirm = SCRIPT("S3ConfirmClick('#adj_close', '%s')"
% T("Do you want to close this adjustment?") )
rfooter.append(close_btn)
rfooter.append(close_btn_confirm)
else:
msg = T("You need to check all the revised quantities before you can close this adjustment")
rfooter.append(SPAN(msg))
s3.rfooter = rfooter
return rheader
return None
# Generic function called by the duplicator methods to determine if the
# record already exists on the database.
def duplicator(job, query):
"""
This callback will be called when importing records it will look
to see if the record being imported is a duplicate.
@param job: An S3ImportJob object which includes all the details
of the record being imported
If the record is a duplicate then it will set the job method to update
"""
# ignore this processing if the id is set
if job.id:
return
db = current.db
table = job.table
_duplicate = db(query).select(table.id, limitby=(0, 1)).first()
if _duplicate:
job.id = _duplicate.id
job.data.id = _duplicate.id
job.method = job.METHOD.UPDATE
return _duplicate.id
return False
# =============================================================================
class InvItemVirtualFields:
""" Virtual fields as dimension classes for reports """
extra_fields = ["pack_value",
"quantity"
]
def total_value(self):
""" Year/Month of the start date of the training event """
try:
return self.inv_inv_item.quantity * self.inv_inv_item.pack_value
except:
# not available
return current.messages.NONE
def item_code(self):
try:
return self.inv_inv_item.item_id.code
except:
# not available
return current.messages.NONE
def item_category(self):
try:
return self.inv_inv_item.item_id.item_category_id.name
except:
# not available
return current.messages.NONE
# END =========================================================================
| {
"content_hash": "625120e39846fab6075f9dc15e4e34cb",
"timestamp": "",
"source": "github",
"line_count": 2088,
"max_line_length": 217,
"avg_line_length": 47.02873563218391,
"alnum_prop": 0.38543321520224855,
"repo_name": "flavour/ssf",
"id": "933d82aaf1fc51e0e97370cd329621720a5f7c76",
"size": "98221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/eden/inv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "9763120"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "21558751"
},
{
"name": "Shell",
"bytes": "1171"
}
],
"symlink_target": ""
} |
from .test_category import *
from .test_manufacturer import *
from .test_product import *
| {
"content_hash": "ba3c6194b0c5012674e1d187dc2e7313",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 32,
"avg_line_length": 30,
"alnum_prop": 0.7666666666666667,
"repo_name": "samitnuk/online_shop",
"id": "7f2a7ad4ac0e63e30cb00a575634026412be7c99",
"size": "90",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/shop/tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "28580"
},
{
"name": "HTML",
"bytes": "32446"
},
{
"name": "JavaScript",
"bytes": "1111"
},
{
"name": "Python",
"bytes": "104680"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url
from . import views
app_name = "pages"
urlpatterns = [
url(r'^(?P<slug>[^/]+)$', views.page, name="page"),
]
| {
"content_hash": "3e7abc6991b0de18d3e50831252beab8",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 55,
"avg_line_length": 19.625,
"alnum_prop": 0.6242038216560509,
"repo_name": "nnscr/nnscr.de",
"id": "c352c982a68c538568f3d362d450f4da62c96a4a",
"size": "157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pages/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7418"
},
{
"name": "HTML",
"bytes": "18999"
},
{
"name": "JavaScript",
"bytes": "3442"
},
{
"name": "Python",
"bytes": "20592"
}
],
"symlink_target": ""
} |
verify_stack_after_op = False
# ######################################
import copy
from ethereum import utils
from ethereum import opcodes
from ethereum.slogging import get_logger
from rlp.utils import encode_hex, ascii_chr
from ethereum.utils import to_string
log_log = get_logger('eth.vm.log')
log_vm_exit = get_logger('eth.vm.exit')
log_vm_op = get_logger('eth.vm.op')
log_vm_op_stack = get_logger('eth.vm.op.stack')
log_vm_op_memory = get_logger('eth.vm.op.memory')
log_vm_op_storage = get_logger('eth.vm.op.storage')
TT256 = 2 ** 256
TT256M1 = 2 ** 256 - 1
TT255 = 2 ** 255
class CallData(object):
def __init__(self, parent_memory, offset=0, size=None):
self.data = parent_memory
self.offset = offset
self.size = len(self.data) if size is None else size
self.rlimit = self.offset + self.size
def extract_all(self):
d = self.data[self.offset: self.offset + self.size]
d += [0] * (self.size - len(d))
return b''.join([ascii_chr(x) for x in d])
def extract32(self, i):
if i >= self.size:
return 0
o = self.data[self.offset + i: min(self.offset + i + 32, self.rlimit)]
return utils.bytearray_to_int(o + [0] * (32 - len(o)))
def extract_copy(self, mem, memstart, datastart, size):
for i in range(size):
if datastart + i < self.size:
mem[memstart + i] = self.data[self.offset + datastart + i]
else:
mem[memstart + i] = 0
class Message(object):
def __init__(self, sender, to, value, gas, data, depth=0,
code_address=None, is_create=False, transfers_value=True):
self.sender = sender
self.to = to
self.value = value
self.gas = gas
self.data = data
self.depth = depth
self.logs = []
self.code_address = code_address
self.is_create = is_create
self.transfers_value = transfers_value
def __repr__(self):
return '<Message(to:%s...)>' % self.to[:8]
class Compustate():
def __init__(self, **kwargs):
self.memory = []
self.stack = []
self.pc = 0
self.gas = 0
for kw in kwargs:
setattr(self, kw, kwargs[kw])
# Preprocesses code, and determines which locations are in the middle
# of pushdata and thus invalid
def preprocess_code(code):
assert isinstance(code, bytes)
code = memoryview(code).tolist()
ops = []
i = 0
while i < len(code):
o = copy.copy(opcodes.opcodes.get(code[i], ['INVALID', 0, 0, 0]) + [code[i], 0])
ops.append(o)
if o[0][:4] == 'PUSH':
for j in range(int(o[0][4:])):
i += 1
byte = code[i] if i < len(code) else 0
o[-1] = (o[-1] << 8) + byte
if i < len(code):
ops.append(['INVALID', 0, 0, 0, byte, 0])
i += 1
return ops
def mem_extend(mem, compustate, op, start, sz):
if sz:
oldsize = len(mem) // 32
old_totalfee = oldsize * opcodes.GMEMORY + \
oldsize ** 2 // opcodes.GQUADRATICMEMDENOM
newsize = utils.ceil32(start + sz) // 32
# if newsize > 524288:
# raise Exception("Memory above 16 MB per call not supported by this VM")
new_totalfee = newsize * opcodes.GMEMORY + \
newsize ** 2 // opcodes.GQUADRATICMEMDENOM
if old_totalfee < new_totalfee:
memfee = new_totalfee - old_totalfee
if compustate.gas < memfee:
compustate.gas = 0
return False
compustate.gas -= memfee
m_extend = (newsize - oldsize) * 32
mem.extend([0] * m_extend)
return True
def data_copy(compustate, size):
if size:
copyfee = opcodes.GCOPY * utils.ceil32(size) // 32
if compustate.gas < copyfee:
compustate.gas = 0
return False
compustate.gas -= copyfee
return True
def eat_gas(compustate, amount):
if compustate.gas < amount:
compustate.gas = 0
return False
else:
compustate.gas -= amount
return True
def max_call_gas(gas):
"""Since EIP150 CALLs will send only all but 1/64th of the available gas.
"""
return gas - (gas // opcodes.CALL_CHILD_LIMIT_DENOM)
def vm_exception(error, **kargs):
log_vm_exit.trace('EXCEPTION', cause=error, **kargs)
return 0, 0, []
def peaceful_exit(cause, gas, data, **kargs):
log_vm_exit.trace('EXIT', cause=cause, **kargs)
return 1, gas, data
code_cache = {}
def vm_execute(ext, msg, code):
# precompute trace flag
# if we trace vm, we're in slow mode anyway
trace_vm = log_vm_op.is_active('trace')
compustate = Compustate(gas=msg.gas)
stk = compustate.stack
mem = compustate.memory
if code in code_cache:
processed_code = code_cache[code]
else:
processed_code = preprocess_code(code)
code_cache[code] = processed_code
codelen = len(processed_code)
op = None
steps = 0
_prevop = None # for trace only
while 1:
# stack size limit error
if compustate.pc >= codelen:
return peaceful_exit('CODE OUT OF RANGE', compustate.gas, [])
op, in_args, out_args, fee, opcode, pushval = \
processed_code[compustate.pc]
# out of gas error
if fee > compustate.gas:
return vm_exception('OUT OF GAS')
# empty stack error
if in_args > len(compustate.stack):
return vm_exception('INSUFFICIENT STACK',
op=op, needed=to_string(in_args),
available=to_string(len(compustate.stack)))
if len(compustate.stack) - in_args + out_args > 1024:
return vm_exception('STACK SIZE LIMIT EXCEEDED',
op=op,
pre_height=to_string(len(compustate.stack)))
# Apply operation
compustate.gas -= fee
compustate.pc += 1
if trace_vm:
"""
This diverges from normal logging, as we use the logging namespace
only to decide which features get logged in 'eth.vm.op'
i.e. tracing can not be activated by activating a sub
like 'eth.vm.op.stack'
"""
trace_data = {}
trace_data['stack'] = list(map(to_string, list(compustate.stack)))
if _prevop in ('MLOAD', 'MSTORE', 'MSTORE8', 'SHA3', 'CALL',
'CALLCODE', 'CREATE', 'CALLDATACOPY', 'CODECOPY',
'EXTCODECOPY'):
if len(compustate.memory) < 1024:
trace_data['memory'] = \
b''.join([encode_hex(ascii_chr(x)) for x
in compustate.memory])
else:
trace_data['sha3memory'] = \
encode_hex(utils.sha3(''.join([ascii_chr(x) for
x in compustate.memory])))
if _prevop in ('SSTORE', 'SLOAD') or steps == 0:
trace_data['storage'] = ext.log_storage(msg.to)
trace_data['gas'] = to_string(compustate.gas + fee)
trace_data['inst'] = opcode
trace_data['pc'] = to_string(compustate.pc - 1)
if steps == 0:
trace_data['depth'] = msg.depth
trace_data['address'] = msg.to
trace_data['op'] = op
trace_data['steps'] = steps
if op[:4] == 'PUSH':
trace_data['pushvalue'] = pushval
log_vm_op.trace('vm', **trace_data)
steps += 1
_prevop = op
# Invalid operation
if op == 'INVALID':
return vm_exception('INVALID OP', opcode=opcode)
# Valid operations
if opcode < 0x10:
if op == 'STOP':
return peaceful_exit('STOP', compustate.gas, [])
elif op == 'ADD':
stk.append((stk.pop() + stk.pop()) & TT256M1)
elif op == 'SUB':
stk.append((stk.pop() - stk.pop()) & TT256M1)
elif op == 'MUL':
stk.append((stk.pop() * stk.pop()) & TT256M1)
elif op == 'DIV':
s0, s1 = stk.pop(), stk.pop()
stk.append(0 if s1 == 0 else s0 // s1)
elif op == 'MOD':
s0, s1 = stk.pop(), stk.pop()
stk.append(0 if s1 == 0 else s0 % s1)
elif op == 'SDIV':
s0, s1 = utils.to_signed(stk.pop()), utils.to_signed(stk.pop())
stk.append(0 if s1 == 0 else (abs(s0) // abs(s1) *
(-1 if s0 * s1 < 0 else 1)) & TT256M1)
elif op == 'SMOD':
s0, s1 = utils.to_signed(stk.pop()), utils.to_signed(stk.pop())
stk.append(0 if s1 == 0 else (abs(s0) % abs(s1) *
(-1 if s0 < 0 else 1)) & TT256M1)
elif op == 'ADDMOD':
s0, s1, s2 = stk.pop(), stk.pop(), stk.pop()
stk.append((s0 + s1) % s2 if s2 else 0)
elif op == 'MULMOD':
s0, s1, s2 = stk.pop(), stk.pop(), stk.pop()
stk.append((s0 * s1) % s2 if s2 else 0)
elif op == 'EXP':
base, exponent = stk.pop(), stk.pop()
# fee for exponent is dependent on its bytes
# calc n bytes to represent exponent
nbytes = len(utils.encode_int(exponent))
expfee = nbytes * opcodes.GEXPONENTBYTE
if compustate.gas < expfee:
compustate.gas = 0
return vm_exception('OOG EXPONENT')
compustate.gas -= expfee
stk.append(pow(base, exponent, TT256))
elif op == 'SIGNEXTEND':
s0, s1 = stk.pop(), stk.pop()
if s0 <= 31:
testbit = s0 * 8 + 7
if s1 & (1 << testbit):
stk.append(s1 | (TT256 - (1 << testbit)))
else:
stk.append(s1 & ((1 << testbit) - 1))
else:
stk.append(s1)
elif opcode < 0x20:
if op == 'LT':
stk.append(1 if stk.pop() < stk.pop() else 0)
elif op == 'GT':
stk.append(1 if stk.pop() > stk.pop() else 0)
elif op == 'SLT':
s0, s1 = utils.to_signed(stk.pop()), utils.to_signed(stk.pop())
stk.append(1 if s0 < s1 else 0)
elif op == 'SGT':
s0, s1 = utils.to_signed(stk.pop()), utils.to_signed(stk.pop())
stk.append(1 if s0 > s1 else 0)
elif op == 'EQ':
stk.append(1 if stk.pop() == stk.pop() else 0)
elif op == 'ISZERO':
stk.append(0 if stk.pop() else 1)
elif op == 'AND':
stk.append(stk.pop() & stk.pop())
elif op == 'OR':
stk.append(stk.pop() | stk.pop())
elif op == 'XOR':
stk.append(stk.pop() ^ stk.pop())
elif op == 'NOT':
stk.append(TT256M1 - stk.pop())
elif op == 'BYTE':
s0, s1 = stk.pop(), stk.pop()
if s0 >= 32:
stk.append(0)
else:
stk.append((s1 // 256 ** (31 - s0)) % 256)
elif opcode < 0x40:
if op == 'SHA3':
s0, s1 = stk.pop(), stk.pop()
compustate.gas -= opcodes.GSHA3WORD * (utils.ceil32(s1) // 32)
if compustate.gas < 0:
return vm_exception('OOG PAYING FOR SHA3')
if not mem_extend(mem, compustate, op, s0, s1):
return vm_exception('OOG EXTENDING MEMORY')
data = b''.join(map(ascii_chr, mem[s0: s0 + s1]))
stk.append(utils.big_endian_to_int(utils.sha3(data)))
elif op == 'ADDRESS':
stk.append(utils.coerce_to_int(msg.to))
elif op == 'BALANCE':
# EIP150: Increase the gas cost of BALANCE to 400
if ext.post_anti_dos_hardfork:
if not eat_gas(compustate, opcodes.BALANCE_SUPPLEMENTAL_GAS):
return vm_exception("OUT OF GAS")
addr = utils.coerce_addr_to_hex(stk.pop() % 2 ** 160)
stk.append(ext.get_balance(addr))
elif op == 'ORIGIN':
stk.append(utils.coerce_to_int(ext.tx_origin))
elif op == 'CALLER':
stk.append(utils.coerce_to_int(msg.sender))
elif op == 'CALLVALUE':
stk.append(msg.value)
elif op == 'CALLDATALOAD':
stk.append(msg.data.extract32(stk.pop()))
elif op == 'CALLDATASIZE':
stk.append(msg.data.size)
elif op == 'CALLDATACOPY':
mstart, dstart, size = stk.pop(), stk.pop(), stk.pop()
if not mem_extend(mem, compustate, op, mstart, size):
return vm_exception('OOG EXTENDING MEMORY')
if not data_copy(compustate, size):
return vm_exception('OOG COPY DATA')
msg.data.extract_copy(mem, mstart, dstart, size)
elif op == 'CODESIZE':
stk.append(len(processed_code))
elif op == 'CODECOPY':
start, s1, size = stk.pop(), stk.pop(), stk.pop()
if not mem_extend(mem, compustate, op, start, size):
return vm_exception('OOG EXTENDING MEMORY')
if not data_copy(compustate, size):
return vm_exception('OOG COPY DATA')
for i in range(size):
if s1 + i < len(processed_code):
mem[start + i] = processed_code[s1 + i][4]
else:
mem[start + i] = 0
elif op == 'GASPRICE':
stk.append(ext.tx_gasprice)
elif op == 'EXTCODESIZE':
# EIP150: Increase the gas cost of EXTCODESIZE to 700
if ext.post_anti_dos_hardfork:
if not eat_gas(compustate, opcodes.EXTCODELOAD_SUPPLEMENTAL_GAS):
return vm_exception("OUT OF GAS")
addr = utils.coerce_addr_to_hex(stk.pop() % 2 ** 160)
stk.append(len(ext.get_code(addr) or b''))
elif op == 'EXTCODECOPY':
# EIP150: Increase the base gas cost of EXTCODECOPY to 700
if ext.post_anti_dos_hardfork:
if not eat_gas(compustate, opcodes.EXTCODELOAD_SUPPLEMENTAL_GAS):
return vm_exception("OUT OF GAS")
addr = utils.coerce_addr_to_hex(stk.pop() % 2 ** 160)
start, s2, size = stk.pop(), stk.pop(), stk.pop()
extcode = ext.get_code(addr) or b''
assert utils.is_string(extcode)
if not mem_extend(mem, compustate, op, start, size):
return vm_exception('OOG EXTENDING MEMORY')
if not data_copy(compustate, size):
return vm_exception('OOG COPY DATA')
for i in range(size):
if s2 + i < len(extcode):
mem[start + i] = utils.safe_ord(extcode[s2 + i])
else:
mem[start + i] = 0
elif opcode < 0x50:
if op == 'BLOCKHASH':
stk.append(utils.big_endian_to_int(ext.block_hash(stk.pop())))
elif op == 'COINBASE':
stk.append(utils.big_endian_to_int(ext.block_coinbase))
elif op == 'TIMESTAMP':
stk.append(ext.block_timestamp)
elif op == 'NUMBER':
stk.append(ext.block_number)
elif op == 'DIFFICULTY':
stk.append(ext.block_difficulty)
elif op == 'GASLIMIT':
stk.append(ext.block_gas_limit)
elif opcode < 0x60:
if op == 'POP':
stk.pop()
elif op == 'MLOAD':
s0 = stk.pop()
if not mem_extend(mem, compustate, op, s0, 32):
return vm_exception('OOG EXTENDING MEMORY')
data = b''.join(map(ascii_chr, mem[s0: s0 + 32]))
stk.append(utils.big_endian_to_int(data))
elif op == 'MSTORE':
s0, s1 = stk.pop(), stk.pop()
if not mem_extend(mem, compustate, op, s0, 32):
return vm_exception('OOG EXTENDING MEMORY')
v = s1
for i in range(31, -1, -1):
mem[s0 + i] = v % 256
v //= 256
elif op == 'MSTORE8':
s0, s1 = stk.pop(), stk.pop()
if not mem_extend(mem, compustate, op, s0, 1):
return vm_exception('OOG EXTENDING MEMORY')
mem[s0] = s1 % 256
elif op == 'SLOAD':
# EIP150: Increase the gas cost of SLOAD to 200
if ext.post_anti_dos_hardfork:
if not eat_gas(compustate, opcodes.SLOAD_SUPPLEMENTAL_GAS):
return vm_exception("OUT OF GAS")
stk.append(ext.get_storage_data(msg.to, stk.pop()))
elif op == 'SSTORE':
s0, s1 = stk.pop(), stk.pop()
if ext.get_storage_data(msg.to, s0):
gascost = opcodes.GSTORAGEMOD if s1 else opcodes.GSTORAGEKILL
refund = 0 if s1 else opcodes.GSTORAGEREFUND
else:
gascost = opcodes.GSTORAGEADD if s1 else opcodes.GSTORAGEMOD
refund = 0
if compustate.gas < gascost:
return vm_exception('OUT OF GAS')
compustate.gas -= gascost
ext.add_refund(refund) # adds neg gascost as a refund if below zero
ext.set_storage_data(msg.to, s0, s1)
elif op == 'JUMP':
compustate.pc = stk.pop()
opnew = processed_code[compustate.pc][0] if \
compustate.pc < len(processed_code) else 'STOP'
if opnew != 'JUMPDEST':
return vm_exception('BAD JUMPDEST')
elif op == 'JUMPI':
s0, s1 = stk.pop(), stk.pop()
if s1:
compustate.pc = s0
opnew = processed_code[compustate.pc][0] if \
compustate.pc < len(processed_code) else 'STOP'
if opnew != 'JUMPDEST':
return vm_exception('BAD JUMPDEST')
elif op == 'PC':
stk.append(compustate.pc - 1)
elif op == 'MSIZE':
stk.append(len(mem))
elif op == 'GAS':
stk.append(compustate.gas) # AFTER subtracting cost 1
elif op[:4] == 'PUSH':
pushnum = int(op[4:])
compustate.pc += pushnum
stk.append(pushval)
elif op[:3] == 'DUP':
depth = int(op[3:])
stk.append(stk[-depth])
elif op[:4] == 'SWAP':
depth = int(op[4:])
temp = stk[-depth - 1]
stk[-depth - 1] = stk[-1]
stk[-1] = temp
elif op[:3] == 'LOG':
"""
0xa0 ... 0xa4, 32/64/96/128/160 + len(data) gas
a. Opcodes LOG0...LOG4 are added, takes 2-6 stack arguments
MEMSTART MEMSZ (TOPIC1) (TOPIC2) (TOPIC3) (TOPIC4)
b. Logs are kept track of during tx execution exactly the same way as suicides
(except as an ordered list, not a set).
Each log is in the form [address, [topic1, ... ], data] where:
* address is what the ADDRESS opcode would output
* data is mem[MEMSTART: MEMSTART + MEMSZ]
* topics are as provided by the opcode
c. The ordered list of logs in the transaction are expressed as [log0, log1, ..., logN].
"""
depth = int(op[3:])
mstart, msz = stk.pop(), stk.pop()
topics = [stk.pop() for x in range(depth)]
compustate.gas -= msz * opcodes.GLOGBYTE
if not mem_extend(mem, compustate, op, mstart, msz):
return vm_exception('OOG EXTENDING MEMORY')
data = b''.join(map(ascii_chr, mem[mstart: mstart + msz]))
ext.log(msg.to, topics, data)
log_log.trace('LOG', to=msg.to, topics=topics, data=list(map(utils.safe_ord, data)))
# print('LOG', msg.to, topics, list(map(ord, data)))
elif op == 'CREATE':
value, mstart, msz = stk.pop(), stk.pop(), stk.pop()
if not mem_extend(mem, compustate, op, mstart, msz):
return vm_exception('OOG EXTENDING MEMORY')
if ext.get_balance(msg.to) >= value and msg.depth < 1024:
cd = CallData(mem, mstart, msz)
ingas = compustate.gas
# EIP150(1b) CREATE only provides all but one 64th of the
# parent gas to the child call
if ext.post_anti_dos_hardfork:
ingas = max_call_gas(ingas)
create_msg = Message(msg.to, b'', value, ingas, cd, msg.depth + 1)
o, gas, addr = ext.create(create_msg)
if o:
stk.append(utils.coerce_to_int(addr))
compustate.gas -= (ingas - gas)
else:
stk.append(0)
compustate.gas -= ingas
else:
stk.append(0)
elif op == 'CALL':
gas, to, value, meminstart, meminsz, memoutstart, memoutsz = \
stk.pop(), stk.pop(), stk.pop(), stk.pop(), stk.pop(), stk.pop(), stk.pop()
if not mem_extend(mem, compustate, op, meminstart, meminsz) or \
not mem_extend(mem, compustate, op, memoutstart, memoutsz):
return vm_exception('OOG EXTENDING MEMORY')
to = utils.encode_int(to)
to = ((b'\x00' * (32 - len(to))) + to)[12:]
extra_gas = (not ext.account_exists(to)) * opcodes.GCALLNEWACCOUNT + \
(value > 0) * opcodes.GCALLVALUETRANSFER + \
ext.post_anti_dos_hardfork * opcodes.CALL_SUPPLEMENTAL_GAS
# ^ EIP150 Increase the gas cost of CALL to 700
if ext.post_anti_dos_hardfork:
# EIP150(1b) if a call asks for more gas than all but one 64th of
# the maximum allowed amount, call with all but one 64th of the
# maximum allowed amount of gas
if compustate.gas < extra_gas:
return vm_exception('OUT OF GAS', needed=extra_gas)
gas = min(gas, max_call_gas(compustate.gas - extra_gas))
else:
if compustate.gas < gas + extra_gas:
return vm_exception('OUT OF GAS', needed=gas + extra_gas)
submsg_gas = gas + opcodes.GSTIPEND * (value > 0)
if ext.get_balance(msg.to) >= value and msg.depth < 1024:
compustate.gas -= (gas + extra_gas)
cd = CallData(mem, meminstart, meminsz)
call_msg = Message(msg.to, to, value, submsg_gas, cd,
msg.depth + 1, code_address=to)
result, gas, data = ext.msg(call_msg)
if result == 0:
stk.append(0)
else:
stk.append(1)
compustate.gas += gas
for i in range(min(len(data), memoutsz)):
mem[memoutstart + i] = data[i]
else:
compustate.gas -= (gas + extra_gas - submsg_gas)
stk.append(0)
elif op == 'CALLCODE' or op == 'DELEGATECALL':
if op == 'CALLCODE':
gas, to, value, meminstart, meminsz, memoutstart, memoutsz = \
stk.pop(), stk.pop(), stk.pop(), stk.pop(), stk.pop(), stk.pop(), stk.pop()
else:
gas, to, meminstart, meminsz, memoutstart, memoutsz = \
stk.pop(), stk.pop(), stk.pop(), stk.pop(), stk.pop(), stk.pop()
value = 0
if not mem_extend(mem, compustate, op, meminstart, meminsz) or \
not mem_extend(mem, compustate, op, memoutstart, memoutsz):
return vm_exception('OOG EXTENDING MEMORY')
extra_gas = (value > 0) * opcodes.GCALLVALUETRANSFER + \
ext.post_anti_dos_hardfork * opcodes.CALL_SUPPLEMENTAL_GAS
# ^ EIP150 Increase the gas cost of CALLCODE, DELEGATECALL to 700
if ext.post_anti_dos_hardfork:
# EIP150(1b) if a call asks for more gas than all but one 64th of
# the maximum allowed amount, call with all but one 64th of the
# maximum allowed amount of gas
if compustate.gas < extra_gas:
return vm_exception('OUT OF GAS', needed=extra_gas)
gas = min(gas, max_call_gas(compustate.gas - extra_gas))
else:
if compustate.gas < gas + extra_gas:
return vm_exception('OUT OF GAS', needed=gas + extra_gas)
submsg_gas = gas + opcodes.GSTIPEND * (value > 0)
if ext.get_balance(msg.to) >= value and msg.depth < 1024:
compustate.gas -= (gas + extra_gas)
to = utils.encode_int(to)
to = ((b'\x00' * (32 - len(to))) + to)[12:]
cd = CallData(mem, meminstart, meminsz)
if ext.post_homestead_hardfork and op == 'DELEGATECALL':
call_msg = Message(msg.sender, msg.to, msg.value, submsg_gas, cd,
msg.depth + 1, code_address=to, transfers_value=False)
elif op == 'DELEGATECALL':
return vm_exception('OPCODE INACTIVE')
else:
call_msg = Message(msg.to, msg.to, value, submsg_gas, cd,
msg.depth + 1, code_address=to)
result, gas, data = ext.msg(call_msg)
if result == 0:
stk.append(0)
else:
stk.append(1)
compustate.gas += gas
for i in range(min(len(data), memoutsz)):
mem[memoutstart + i] = data[i]
else:
compustate.gas -= (gas + extra_gas - submsg_gas)
stk.append(0)
elif op == 'RETURN':
s0, s1 = stk.pop(), stk.pop()
if not mem_extend(mem, compustate, op, s0, s1):
return vm_exception('OOG EXTENDING MEMORY')
return peaceful_exit('RETURN', compustate.gas, mem[s0: s0 + s1])
elif op == 'SUICIDE':
to = utils.encode_int(stk.pop())
to = ((b'\x00' * (32 - len(to))) + to)[12:]
if ext.post_anti_dos_hardfork:
# EIP150 Increase the gas cost of SUICIDE to 5000
extra_gas = opcodes.SUICIDE_SUPPLEMENTAL_GAS + \
(not ext.account_exists(to)) * opcodes.GCALLNEWACCOUNT
# ^ EIP150(1c) If SUICIDE hits a newly created account, it
# triggers an additional gas cost of 25000 (similar to CALLs)
if not eat_gas(compustate, extra_gas):
return vm_exception("OUT OF GAS")
xfer = ext.get_balance(msg.to)
ext.set_balance(to, ext.get_balance(to) + xfer)
ext.set_balance(msg.to, 0)
ext.add_suicide(msg.to)
# print('suiciding %s %s %d' % (msg.to, to, xfer))
return 1, compustate.gas, []
# this is slow!
# for a in stk:
# assert is_numeric(a), (op, stk)
# assert a >= 0 and a < 2**256, (a, op, stk)
class VmExtBase():
def __init__(self):
self.get_code = lambda addr: b''
self.get_balance = lambda addr: 0
self.set_balance = lambda addr, balance: 0
self.set_storage_data = lambda addr, key, value: 0
self.get_storage_data = lambda addr, key: 0
self.log_storage = lambda addr: 0
self.add_suicide = lambda addr: 0
self.add_refund = lambda x: 0
self.block_prevhash = 0
self.block_coinbase = 0
self.block_timestamp = 0
self.block_number = 0
self.block_difficulty = 0
self.block_gas_limit = 0
self.log = lambda addr, topics, data: 0
self.tx_origin = b'0' * 40
self.tx_gasprice = 0
self.create = lambda msg: 0, 0, 0
self.call = lambda msg: 0, 0, 0
self.sendmsg = lambda msg: 0, 0, 0
| {
"content_hash": "d01fc67247d0d08e06c6ea5bea95d3d0",
"timestamp": "",
"source": "github",
"line_count": 681,
"max_line_length": 100,
"avg_line_length": 42.86637298091043,
"alnum_prop": 0.49136749794464235,
"repo_name": "shahankhatch/pyethereum",
"id": "a1b3edad572ea67cd4dcc2f869a3ea1eefcc3977",
"size": "29235",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "ethereum/vm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2045"
},
{
"name": "Python",
"bytes": "517220"
},
{
"name": "Shell",
"bytes": "411"
}
],
"symlink_target": ""
} |
from setuptools import setup
VERSION = '0.1.1'
def read(file_name):
with open(file_name, 'r') as f:
return f.read()
setup(
name='django-terminator',
description='''One time method executor for Django models''',
long_description=read('README.rst'),
version=str(VERSION),
author='Krzysztof Jurewicz',
author_email='krzysztof.jurewicz@gmail.com',
url='http://github.com/KrzysiekJ/django-terminator',
packages=['terminator'],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
)
| {
"content_hash": "4f7154a01e9f082be31a911868044573",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 65,
"avg_line_length": 29.666666666666668,
"alnum_prop": 0.6179775280898876,
"repo_name": "KrzysiekJ/django-terminator",
"id": "dca07c017cdfaf7907fd4b1df9f90aba90aa1f4c",
"size": "913",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9673"
}
],
"symlink_target": ""
} |
import logging
from datetime import date
from dateutil.relativedelta import relativedelta
from dateutil.rrule import (
MONTHLY,
rrule,
)
from decimal import Decimal
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import (
models,
transaction,
)
from django.utils import timezone
import reversion
import stripe
from base.model_utils import TimeStampedModel
from base.singleton import SingletonModel
from mail.models import Notify
from mail.service import (
queue_mail_message,
queue_mail_template,
)
CURRENCY = 'GBP'
logger = logging.getLogger(__name__)
def _card_error(e):
return (
"CardError: param '{}' code '{}' http body '{}' "
"http status '{}'".format(
e.param,
e.code,
e.http_body,
e.http_status,
)
)
def _stripe_error(e):
return ("http body: '{}' http status: '{}'".format(
e.http_body,
e.http_status,
))
def as_pennies(total):
return int(total * Decimal('100'))
def default_checkout_state():
return CheckoutState.objects.get(slug=CheckoutState.PENDING).pk
def expiry_date_as_str(item):
d = item.get('expiry_date', None)
return d.strftime('%Y%m%d') if d else ''
class CheckoutError(Exception):
def __init__(self, value):
Exception.__init__(self)
self.value = value
def __str__(self):
return repr('%s, %s' % (self.__class__.__name__, self.value))
class CheckoutStateManager(models.Manager):
@property
def fail(self):
return self.model.objects.get(slug=self.model.FAIL)
@property
def pending(self):
return self.model.objects.get(slug=self.model.PENDING)
@property
def request(self):
"""The 'request' state is used for payment plans only.
It allows the system to set the state to ``request`` before charging
the account.
"""
return self.model.objects.get(slug=self.model.REQUEST)
@property
def success(self):
return self.model.objects.get(slug=self.model.SUCCESS)
class CheckoutState(TimeStampedModel):
FAIL = 'fail'
PENDING = 'pending'
# The 'request' state is used for payment plans only.
REQUEST = 'request'
SUCCESS = 'success'
name = models.CharField(max_length=100)
slug = models.SlugField(unique=True)
objects = CheckoutStateManager()
class Meta:
ordering = ('name',)
verbose_name = 'Checkout state'
verbose_name_plural = 'Checkout states'
def __str__(self):
return '{}'.format(self.name)
@property
def is_success(self):
return self.slug == self.SUCCESS
@property
def is_pending(self):
return self.slug == self.PENDING
reversion.register(CheckoutState)
class CheckoutActionManager(models.Manager):
@property
def card_refresh(self):
return self.model.objects.get(slug=self.model.CARD_REFRESH)
@property
def charge(self):
return self.model.objects.get(slug=self.model.CHARGE)
@property
def invoice(self):
return self.model.objects.get(slug=self.model.INVOICE)
@property
def manual(self):
return self.model.objects.get(slug=self.model.MANUAL)
@property
def payment(self):
return self.model.objects.get(slug=self.model.PAYMENT)
@property
def payment_plan(self):
return self.model.objects.get(slug=self.model.PAYMENT_PLAN)
class CheckoutAction(TimeStampedModel):
CARD_REFRESH = 'card_refresh'
CHARGE = 'charge'
INVOICE = 'invoice'
MANUAL = 'manual'
PAYMENT = 'payment'
PAYMENT_PLAN = 'payment_plan'
name = models.CharField(max_length=100)
slug = models.SlugField(unique=True)
payment = models.BooleanField()
objects = CheckoutActionManager()
class Meta:
ordering = ('name',)
verbose_name = 'Checkout action'
verbose_name_plural = 'Checkout action'
def __str__(self):
return '{}'.format(self.name)
@property
def invoice(self):
return self.slug == self.INVOICE
reversion.register(CheckoutAction)
class CustomerManager(models.Manager):
def _create_customer(self, name, email, customer_id):
obj = self.model(name=name, email=email, customer_id=customer_id)
obj.save()
return obj
def _get_customer(self, email):
return self.model.objects.get(email=email)
def _stripe_create(self, email, description, token):
"""Use the Stripe API to create a customer."""
try:
stripe.api_key = settings.STRIPE_SECRET_KEY
customer = stripe.Customer.create(
email=email,
description=description,
card=token,
)
return customer.id
except (stripe.InvalidRequestError, stripe.StripeError) as e:
raise CheckoutError(
"Error creating Stripe customer '{}': {}".format(
email, _stripe_error(e)
)) from e
def _stripe_get_card_expiry(self, customer_id):
result = (0, 0)
stripe.api_key = settings.STRIPE_SECRET_KEY
customer = stripe.Customer.retrieve(customer_id)
default_card = customer['default_card']
# find the details of the default card
for card in customer['cards']['data']:
if card['id'] == default_card:
# find the expiry date of the default card
result = (int(card['exp_year']), int(card['exp_month']))
break
return result
def _stripe_update(self, customer_id, description, token):
"""Use the Stripe API to update a customer."""
try:
stripe.api_key = settings.STRIPE_SECRET_KEY
stripe_customer = stripe.Customer.retrieve(customer_id)
stripe_customer.description = description
stripe_customer.card = token
stripe_customer.save()
except stripe.StripeError as e:
raise CheckoutError(
"Error updating Stripe customer '{}': {}".format(
customer_id, _stripe_error(e)
)) from e
def init_customer(self, content_object, token):
"""Initialise Stripe customer using email, description and token.
1. Lookup existing customer record in the database.
- Retrieve customer from Stripe and update description and token.
2. If the customer does not exist:
- Create Stripe customer with email, description and token.
- Create a customer record in the database.
Return the customer database record and clear the card ``refresh``
flag.
"""
name = content_object.checkout_name
email = content_object.checkout_email
try:
obj = self._get_customer(email)
obj.name = name
obj.refresh = False
obj.save()
self._stripe_update(obj.customer_id, name, token)
except self.model.DoesNotExist:
customer_id = self._stripe_create(email, name, token)
obj = self._create_customer(name, email, customer_id)
return obj
@property
def refresh(self):
return self.model.objects.filter(refresh=True)
def refresh_credit_card(self, email):
result = False
try:
customer = self._get_customer(email)
result = customer.refresh
except self.model.DoesNotExist:
pass
return result
def update_card_expiry(self, email):
"""Find the customer, get the expiry date from Stripe and update."""
try:
obj = self._get_customer(email)
year, month = self._stripe_get_card_expiry(obj.customer_id)
if year and month:
# last day of the month
obj.expiry_date = date(year, month, 1) + relativedelta(
months=+1, day=1, days=-1
)
# is the card expiring soon?
is_expiring = obj.is_expiring
if obj.refresh == is_expiring:
pass
else:
with transaction.atomic():
obj.refresh = is_expiring
# save the details
obj.save()
# email the customer
queue_mail_template(
obj,
self.model.MAIL_TEMPLATE_CARD_EXPIRY,
{obj.email: dict(name=obj.name)}
)
except Customer.DoesNotExist:
pass
class Customer(TimeStampedModel):
"""Stripe Customer.
Link the Stripe customer to an email address (and name).
Note: It is expected that multiple users in our databases could have the
same email address. If they have different names, then this table looks
very confusing. Try checking the 'content_object' of the 'Checkout' model
if you need to diagnose an issue.
"""
MAIL_TEMPLATE_CARD_EXPIRY = 'customer_card_expiry'
name = models.TextField()
email = models.EmailField(unique=True)
customer_id = models.TextField()
expiry_date = models.DateField(blank=True, null=True)
refresh = models.BooleanField(
default=False,
help_text='Should the customer refresh their card details?'
)
objects = CustomerManager()
class Meta:
ordering = ('pk',)
verbose_name = 'Customer'
verbose_name_plural = 'Customers'
def __str__(self):
return '{} {}'.format(self.email, self.customer_id)
@property
def is_expiring(self):
"""Is the card expiring within the next month?
If the ``expiry_date`` is ``None``, then it has *not* expired.
The expiry date is set to the last day of the month e.g. for September
2015, the ``expiry_date`` will be 30/09/2015.
"""
result = False
one_month = date.today() + relativedelta(months=+1)
if self.expiry_date and self.expiry_date <= one_month:
return True
return result
#return bool(self.expiry_date and self.expiry_date < date.today())
reversion.register(Customer)
class CheckoutManager(models.Manager):
def audit(self):
return self.model.objects.all().order_by('-pk')
def create_checkout(self, action, content_object, user):
"""Create a checkout payment request."""
if action == CheckoutAction.objects.card_refresh:
total = None
else:
total = content_object.checkout_total
obj = self.model(
checkout_date=timezone.now(),
action=action,
content_object=content_object,
description=', '.join(content_object.checkout_description),
total=total,
)
# an anonymous user can create a checkout
if user.is_authenticated():
obj.user = user
obj.save()
return obj
def charge(self, content_object, current_user):
"""Collect some money from a customer.
You must be a member of staff to use this method. For payment plans,
a background process will charge the card. In this case, the user will
be anonymous.
We should only attempt to collect money if the customer has already
entered their card details.
"""
content_object.refresh_from_db()
if not content_object.checkout_can_charge:
raise CheckoutError('Cannot charge the card.')
try:
customer = Customer.objects.get(
email=content_object.checkout_email
)
except Customer.DoesNotExist as e:
raise CheckoutError(
"Customer '{}' has not registered a card".format(
content_object.checkout_email
)
) from e
action = CheckoutAction.objects.charge
checkout = self.create_checkout(
action,
content_object,
current_user
)
checkout.customer = customer
checkout.save()
try:
checkout.charge(current_user)
with transaction.atomic():
checkout.success()
except CheckoutError:
with transaction.atomic():
checkout.fail()
checkout.notify()
def manual(self, content_object, current_user):
"""Mark a transaction as paid (manual).
You must be a member of staff to use this method.
"""
content_object.refresh_from_db()
if not current_user.is_staff:
raise CheckoutError(
'Only a member of staff can mark this transaction as paid.'
)
valid_state = (
CheckoutState.FAIL,
CheckoutState.PENDING,
CheckoutState.REQUEST,
)
if not content_object.state.slug in valid_state:
raise CheckoutError('Cannot mark this transaction as paid.')
action = CheckoutAction.objects.manual
checkout = self.create_checkout(
action,
content_object,
current_user
)
checkout.save()
try:
with transaction.atomic():
checkout.success()
except CheckoutError:
with transaction.atomic():
checkout.fail()
def success(self):
return self.audit().filter(state=CheckoutState.objects.success)
class Checkout(TimeStampedModel):
"""Checkout.
Create a 'Checkout' instance when you want to interact with Stripe e.g.
take a payment, get card details to set-up a payment plan or refresh the
details of an expired card.
"""
checkout_date = models.DateTimeField()
action = models.ForeignKey(CheckoutAction)
customer = models.ForeignKey(
Customer,
blank=True,
null=True,
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='+',
blank=True,
null=True,
help_text=(
'User who created the checkout request '
'(or blank if the the user is not logged in)'
)
)
state = models.ForeignKey(
CheckoutState,
default=default_checkout_state
#blank=True, null=True
)
description = models.TextField()
total = models.DecimalField(
max_digits=8, decimal_places=2, blank=True, null=True
)
# link to the object in the system which requested the checkout
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
objects = CheckoutManager()
class Meta:
ordering = ('pk',)
verbose_name = 'Checkout'
verbose_name_plural = 'Checkouts'
def __str__(self):
return '{}'.format(self.content_object.checkout_email)
def _charge(self):
"""Charge the card."""
if self.action.payment:
self._charge_stripe()
def _charge_stripe(self):
"""Create the charge on stripe's servers."""
stripe.api_key = settings.STRIPE_SECRET_KEY
try:
stripe.Charge.create(
amount=as_pennies(self.total),
currency=CURRENCY,
customer=self.customer.customer_id,
description=self.description,
)
except stripe.CardError as e:
raise CheckoutError(
"Card error: '{}' checkout '{}', object '{}': {}".format(
e.code, self.pk, self.content_object.pk, _card_error(e),
)
) from e
except stripe.StripeError as e:
raise CheckoutError(
"Card error: checkout '{}', object '{}': {}".format(
self.pk, self.content_object.pk, _stripe_error(e),
)
) from e
def _success_or_fail(self, state):
self.state = state
self.save()
def charge(self, current_user):
"""Charge the user's card.
Must be a member of staff or anonymous (used when running as a
background task) to use this method.
To take payments for the current user, use the ``charge_user`` method.
"""
if current_user.is_staff or current_user.is_anonymous:
self._charge()
else:
raise CheckoutError(
"Cannot process - payments can only "
"be taken by a member of staff. "
"Current: '{}', Customer: '{}'".format(
current_user.email, self.customer.email
))
def charge_user(self, current_user):
"""Charge the card of the current user.
Use this method when the logged in user is performing the transaction.
To take money from another user's card, you must be a member of staff
and use the ``charge`` method.
"""
anonymous = not current_user.is_authenticated()
if anonymous or self.customer.email == current_user.email:
self._charge()
else:
raise CheckoutError(
"Cannot process - payments can only be taken "
"for an anonymous user or the current user. "
"Current: '{}', Customer: '{}'".format(
current_user.email, self.customer.email
))
@property
def content_object_url(self):
return self.content_object.get_absolute_url()
def fail(self):
"""Checkout failed - so update and notify admin."""
self._success_or_fail(CheckoutState.objects.fail)
return self.content_object.checkout_fail()
@property
def failed(self):
"""Did the checkout request fail?"""
return self.state == CheckoutState.objects.fail
@property
def invoice_data(self):
try:
data = self.checkoutinvoice
return filter(None, (
data.contact_name,
data.company_name,
data.address_1,
data.address_2,
data.address_3,
data.town,
data.county,
data.postcode,
data.country,
data.email,
data.phone,
))
except CheckoutAdditional.DoesNotExist:
return []
@property
def is_invoice(self):
return self.action == CheckoutAction.objects.invoice
@property
def is_manual(self):
return self.action == CheckoutAction.objects.manual
@property
def is_payment(self):
return self.action == CheckoutAction.objects.payment
@property
def is_payment_plan(self):
"""Used in success templates."""
return self.action == CheckoutAction.objects.payment_plan
def notify(self, request=None):
"""Send notification of checkout status.
Pass in a 'request' if you want the email to contain the URL of the
checkout transaction.
"""
email_addresses = [n.email for n in Notify.objects.all()]
if email_addresses:
caption = self.action.name
state = self.state.name
subject = '{} - {} from {}'.format(
state.upper(),
caption.capitalize(),
self.content_object.checkout_name,
)
message = '{} - {} - {} from {}, {}:'.format(
self.created.strftime('%d/%m/%Y %H:%M'),
state.upper(),
caption,
self.content_object.checkout_name,
self.content_object.checkout_email,
)
message = message + '\n\n{}\n\n{}'.format(
self.description,
request.build_absolute_uri(self.content_object_url) if request else '',
)
queue_mail_message(
self,
email_addresses,
subject,
message,
)
else:
logger.error(
"Cannot send email notification of checkout transaction. "
"No email addresses set-up in 'enquiry.models.Notify'"
)
def success(self):
"""Checkout successful - so update and notify admin."""
self._success_or_fail(CheckoutState.objects.success)
return self.content_object.checkout_success(self)
reversion.register(Checkout)
class CheckoutAdditionalManager(models.Manager):
def create_checkout_additional(self, checkout, **kwargs):
obj = self.model(checkout=checkout, **kwargs)
obj.save()
return obj
class CheckoutAdditional(TimeStampedModel):
"""If a user decides to pay by invoice, there are the details.
Links with the 'CheckoutForm' in ``checkout/forms.py``. Probably easier to
put validation in the form if required.
"""
checkout = models.OneToOneField(Checkout)
# company
company_name = models.CharField(max_length=100, blank=True)
address_1 = models.CharField('Address', max_length=100, blank=True)
address_2 = models.CharField('', max_length=100, blank=True)
address_3 = models.CharField('', max_length=100, blank=True)
town = models.CharField(max_length=100, blank=True)
county = models.CharField(max_length=100, blank=True)
postcode = models.CharField(max_length=20, blank=True)
country = models.CharField(max_length=100, blank=True)
# contact
contact_name = models.CharField(max_length=100, blank=True)
email = models.EmailField(blank=False)
phone = models.CharField(max_length=50, blank=True)
date_of_birth = models.DateField(}
objects = CheckoutAdditionalManager()
class Meta:
ordering = ('email',)
verbose_name = 'Checkout Additional Information'
verbose_name_plural = 'Checkout Additional Information'
def __str__(self):
return '{}'.format(self.email)
reversion.register(CheckoutAdditional)
class PaymentPlanManager(models.Manager):
def create_payment_plan(self, slug, name, deposit, count, interval):
obj = self.model(
slug=slug,
name=name,
deposit=deposit,
count=count,
interval=interval,
)
obj.save()
return obj
def current(self):
"""List of payment plan headers excluding 'deleted'."""
return self.model.objects.exclude(deleted=True)
class PaymentPlan(TimeStampedModel):
"""Definition of a payment plan."""
name = models.TextField()
slug = models.SlugField(unique=True)
deposit = models.IntegerField(help_text='Initial deposit as a percentage')
count = models.IntegerField(help_text='Number of instalments')
interval = models.IntegerField(help_text='Instalment interval in months')
deleted = models.BooleanField(default=False)
objects = PaymentPlanManager()
class Meta:
ordering = ('slug',)
verbose_name = 'Payment plan'
verbose_name_plural = 'Payment plan'
def __str__(self):
return '{}'.format(self.slug)
def clean(self):
if not self.count:
raise ValidationError('Set at least one instalment.')
if not self.deposit:
raise ValidationError('Set an initial deposit.')
if not self.interval:
raise ValidationError('Set the number of months between instalments.')
def save(self, *args, **kwargs):
if self.can_update:
super().save(*args, **kwargs)
else:
raise CheckoutError(
'Payment plan in use. Cannot be updated.'
)
@property
def can_update(self):
count = ObjectPaymentPlan.objects.filter(payment_plan=self).count()
if count:
return False
else:
return True
def deposit_amount(self, total):
return (
total * (self.deposit / Decimal('100'))
).quantize(Decimal('.01'))
def instalments(self, deposit_due_date, total):
# deposit
deposit = self.deposit_amount(total)
# list of dates
first_interval = self.interval
if deposit_due_date.day > 15:
first_interval = first_interval + 1
start_date = deposit_due_date + relativedelta(
months=+first_interval,
day=1,
)
instalment_dates = [d.date() for d in rrule(
MONTHLY,
count=self.count,
dtstart=start_date,
interval=self.interval,
)]
# instalments
instalment = (
(total - deposit) / self.count
).quantize(Decimal('.01'))
# list of payment amounts
values = []
check = deposit
for d in instalment_dates:
value = instalment
values.append(value)
check = check + value
# make the total match
values[-1] = values[-1] + (total - check)
return list(zip(instalment_dates, values))
def example(self, deposit_due_date, total):
result = [
(deposit_due_date, self.deposit_amount(total)),
]
return result + self.instalments(deposit_due_date, total)
reversion.register(PaymentPlan)
class ObjectPaymentPlanManager(models.Manager):
def charge_deposit(self, content_object, user):
payment_plan = self.for_content_object(content_object)
payment_plan.charge_deposit(user)
def create_object_payment_plan(self, content_object, payment_plan, total):
"""Create a payment plan for an object with the initial deposit record.
This method must be called from within a transaction.
"""
obj = self.model(
content_object=content_object,
payment_plan=payment_plan,
total=total,
)
obj.save()
ObjectPaymentPlanInstalment.objects.create_object_payment_plan_instalment(
obj,
1,
True,
payment_plan.deposit_amount(total),
date.today()
)
return obj
def for_content_object(self, obj):
return self.model.objects.get(
content_type=ContentType.objects.get_for_model(obj),
object_id=obj.pk
)
@property
def outstanding_payment_plans(self):
"""List of outstanding payment plans.
Used to refresh card expiry dates.
"""
values = ObjectPaymentPlanInstalment.objects.filter(
state__slug__in=(
CheckoutState.FAIL,
CheckoutState.PENDING,
CheckoutState.REQUEST,
),
).values_list(
'object_payment_plan__pk',
flat=True
)
# 'set' will remove duplicate 'values'
return self.model.objects.filter(
pk__in=(set(values)),
).exclude(
deleted=True,
)
@property
def report_card_expiry_dates(self):
emails = []
result = []
payment_plans = self.outstanding_payment_plans
for item in payment_plans:
emails.append(item.content_object.checkout_email)
# get the expiry date for all the customers (as a 'dict')
customers = dict(Customer.objects.filter(
email__in=emails
).values_list(
'email', 'expiry_date'
))
for item in payment_plans:
expiry_date = customers.get(item.content_object.checkout_email)
result.append(dict(
expiry_date=expiry_date,
object_payment_plan=item,
))
return sorted(result, key=expiry_date_as_str)
def refresh_card_expiry_dates(self):
"""Refresh the card expiry dates for outstanding payment plans."""
for plan in self.outstanding_payment_plans:
Customer.objects.update_card_expiry(
plan.content_object.checkout_email
)
class ObjectPaymentPlan(TimeStampedModel):
"""Payment plan for an object."""
# link to the object in the system which requested the payment plan
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
# payment plan
payment_plan = models.ForeignKey(PaymentPlan)
total = models.DecimalField(max_digits=8, decimal_places=2)
# is this object deleted?
deleted = models.BooleanField(default=False)
objects = ObjectPaymentPlanManager()
class Meta:
ordering = ('created',)
unique_together = ('object_id', 'content_type')
verbose_name = 'Object payment plan'
verbose_name_plural = 'Object payment plans'
def __str__(self):
return '{} created {}'.format(self.payment_plan.name, self.created)
@property
def _check_create_instalments(self):
"""Check the current records to make sure we can create instalments."""
instalments = self.objectpaymentplaninstalment_set.all()
count = instalments.count()
if not count:
# a payment plan should always have a deposit record
raise CheckoutError(
"no deposit/instalment record set-up for "
"payment plan: '{}'".format(self.pk)
)
if count == 1:
# check the first payment is the deposit
first_instalment = instalments.first()
if first_instalment.deposit:
deposit_due_date = first_instalment.due
else:
raise CheckoutError(
"no deposit record for "
"payment plan: '{}'".format(self.pk)
)
else:
# cannot create instalments if already created!
raise CheckoutError(
"instalments already created for this "
"payment plan: '{}'".format(self.pk)
)
return deposit_due_date
def create_instalments(self):
deposit_due_date = self._check_create_instalments
instalments = self.payment_plan.instalments(
deposit_due_date,
self.total
)
count = 1
for due, amount in instalments:
count = count + 1
ObjectPaymentPlanInstalment.objects.create_object_payment_plan_instalment(
self,
count,
False,
amount,
due,
)
def charge_deposit(self, user):
self._check_create_instalments
deposit = self.objectpaymentplaninstalment_set.first()
Checkout.objects.charge(deposit, user)
@property
def payment_count(self):
return self.objectpaymentplaninstalment_set.count()
@property
def payments(self):
return self.objectpaymentplaninstalment_set.all().order_by('count')
reversion.register(ObjectPaymentPlan)
class ObjectPaymentPlanInstalmentManager(models.Manager):
#@property
#def checkout_list(self):
# """All 'checkout' transactions for object payment plan instalments.
# Use to audit checkout failures so we can block access.
# """
# return Checkout.objects.filter(
# content_type=ContentType.objects.get_for_model(self.model),
# )
def create_object_payment_plan_instalment(
self, object_payment_plan, count, deposit, amount, due):
obj = self.model(
object_payment_plan=object_payment_plan,
count=count,
deposit=deposit,
amount=amount,
due=due,
)
obj.save()
return obj
@property
def due(self):
"""Lock the records while we try and take the payment.
TODO Do we need to check that a payment is not already linked to this
record?
"""
return self.model.objects.filter(
due__lte=date.today(),
state__slug=CheckoutState.PENDING,
).exclude(
deposit=True,
).exclude(
object_payment_plan__deleted=True,
)
def process_payments(self):
"""Process pending payments.
We set the status to 'request' before asking for the money. This is
because we can't put the payment request into a transaction. If we are
not careful, we could have a situation where the payment succeeds and
we don't manage to set the state to 'success'. In the code below, if
the payment fails the record will be left in the 'request' state and
so we won't ask for the money again.
"""
pks = [o.pk for o in self.due]
for pk in pks:
with transaction.atomic():
# make sure the payment is still pending
instalment = self.model.objects.select_for_update(
nowait=True
).get(
pk=pk,
state__slug=CheckoutState.PENDING
)
# we are ready to request payment
instalment.state = CheckoutState.objects.request
instalment.save()
# request payment
Checkout.objects.charge(instalment, AnonymousUser())
class ObjectPaymentPlanInstalment(TimeStampedModel):
"""Payments due for an object.
The deposit record gets created first. It has the ``deposit`` field set to
``True``.
The instalment records are created after the deposit has been collected.
Instalment records have the ``deposit`` field set to ``False``.
"""
object_payment_plan = models.ForeignKey(ObjectPaymentPlan)
count = models.IntegerField()
state = models.ForeignKey(
CheckoutState,
default=default_checkout_state,
#blank=True, null=True
)
deposit = models.BooleanField(help_text='Is this the initial payment')
amount = models.DecimalField(max_digits=8, decimal_places=2)
due = models.DateField()
objects = ObjectPaymentPlanInstalmentManager()
class Meta:
unique_together = (
('object_payment_plan', 'due'),
('object_payment_plan', 'count'),
)
verbose_name = 'Payments for an object'
verbose_name_plural = 'Payments for an object'
def __str__(self):
return '{} {} {}'.format(
self.object_payment_plan.payment_plan.name,
self.due,
self.amount,
)
def get_absolute_url(self):
"""TODO Update this to display the payment plan."""
return reverse('project.home')
@property
def checkout_actions(self):
"""No actions as payment is charged directly."""
return []
@property
def checkout_can_charge(self):
"""Check we can take the payment."""
result = False
if self.deposit:
check = self.state.slug == CheckoutState.PENDING
else:
check = self.state.slug == CheckoutState.REQUEST
if check:
result = self.due <= date.today()
return result
@property
def checkout_description(self):
result = [
'{}'.format(
self.object_payment_plan.payment_plan.name,
),
]
if self.deposit:
result.append('Deposit')
else:
result.append('Instalment {} of {}'.format(
self.count,
self.object_payment_plan.payment_count,
))
return result
@property
def checkout_email(self):
return self.object_payment_plan.content_object.checkout_email
def checkout_fail(self):
"""Update the object to record the payment failure.
Called from within a transaction so you can update the model.
"""
self.state = CheckoutState.objects.fail
self.save()
self.object_payment_plan.content_object.checkout_fail(due=self.due)
def checkout_fail_url(self, checkout_pk):
"""No UI, so no URL."""
return None
@property
def checkout_name(self):
return self.object_payment_plan.content_object.checkout_name
def checkout_success(self, checkout):
"""Update the object to record the payment success.
Called from within a transaction and you can update the model.
"""
self.state = checkout.state
self.save()
if self.deposit:
self.object_payment_plan.create_instalments()
self.object_payment_plan.content_object.checkout_success(checkout)
def checkout_success_url(self, checkout_pk):
"""No UI, so no URL."""
return None
@property
def checkout_total(self):
return self.amount
reversion.register(ObjectPaymentPlanInstalment)
class CheckoutSettingsManager(models.Manager):
@property
def settings(self):
try:
return self.model.objects.get()
except self.model.DoesNotExist:
raise CheckoutError(
"Checkout settings have not been set-up in admin"
)
class CheckoutSettings(SingletonModel):
default_payment_plan = models.ForeignKey(
PaymentPlan,
)
objects = CheckoutSettingsManager()
class Meta:
verbose_name = 'Checkout Settings'
def __str__(self):
return "Default Payment Plan: {}".format(
self.default_payment_plan.name
)
reversion.register(CheckoutSettings)
#class ObjectPaymentPlanInstalmentCheckoutAudit(TimeStampedModel):
# """Keep an audit of checkout status."""
#
# object_payment_plan_instalment = models.ForeignKey(
# ObjectPaymentPlanInstalment
# )
# state = models.ForeignKey(
# CheckoutState,
# default=default_checkout_state,
# #blank=True, null=True
# )
#
# ChargeAudit.objects.create_charge_audit(
# content_object,
# current_user,
# checkout,
# )
| {
"content_hash": "0f0c6228f21fdef52d7a27f156f2f3bd",
"timestamp": "",
"source": "github",
"line_count": 1245,
"max_line_length": 87,
"avg_line_length": 30.840160642570282,
"alnum_prop": 0.5877435149494739,
"repo_name": "pkimber/checkout",
"id": "916fcf31548848689ef72320f0b8fe0800dfddf7",
"size": "38422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "checkout/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "30373"
},
{
"name": "Python",
"bytes": "157568"
},
{
"name": "Shell",
"bytes": "350"
}
],
"symlink_target": ""
} |
"A collection of augmented standard classes"
from hawkweed.classes.list_cls import List
from hawkweed.classes.dict_cls import Dict
from hawkweed.classes.iterable import Iterable
from hawkweed.classes.set_cls import Set
from hawkweed.classes.repr import Repr
from hawkweed.classes.collection import Collection
| {
"content_hash": "46e6c9418423f854c3741f4f6c596e1f",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 50,
"avg_line_length": 44.142857142857146,
"alnum_prop": 0.8478964401294499,
"repo_name": "hellerve/hawkweed",
"id": "af306f277f016fe1151439dd4de0059c3249cd8e",
"size": "309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hawkweed/classes/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "339784"
},
{
"name": "Shell",
"bytes": "144"
}
],
"symlink_target": ""
} |
"""Package contenant la commande 'route liste'."""
from primaires.format.tableau import Tableau, DROITE
from primaires.interpreteur.masque.parametre import Parametre
class PrmListe(Parametre):
"""Commande 'route liste'"""
def __init__(self):
"""Constructeur du paramètre."""
Parametre.__init__(self, "liste", "list")
self.aide_courte = "affiche la liste des routes"
self.aide_longue = \
"Cette commande permet d'afficher la liste des " \
"routes. Dans ce tableau se trouve l'origine de la route, " \
"sa destination, sa taille (c'est-à-dire le nombre de " \
"salles intermédiaires, en comptant la salle finale de " \
"la liste). Les routes en construction n'ont pas de " \
"destination définie."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande."""
routes = list(importeur.route.routes.values())
routes.sort(key=lambda t: t.ident)
if not routes:
personnage << "|att|Aucune route n'a été créée.|ff|"
return
tableau = Tableau("Routes existantes")
tableau.ajouter_colonne("ID", DROITE)
tableau.ajouter_colonne("Origine")
tableau.ajouter_colonne("Destination")
tableau.ajouter_colonne("Taille", DROITE)
for i, route in enumerate(routes):
origine = route.origine
origine = origine.ident if origine else "|att|inconnue|ff|"
destination = route.destination
destination = destination.ident if destination else \
"|att|inconnue|ff|"
tableau.ajouter_ligne(i + 1, origine, destination,
str(len(route.salles)))
personnage << tableau.afficher()
| {
"content_hash": "2aebfd3b795c06189314915127e330e0",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 73,
"avg_line_length": 40.15555555555556,
"alnum_prop": 0.6104039845047039,
"repo_name": "stormi/tsunami",
"id": "3d27163ab1f60dbd2a00b9b6d35277491b895612",
"size": "3379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/secondaires/route/commandes/route/liste.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7188300"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
} |
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), '..')) # append the MIALab root directory to Python path
# fixes the ModuleNotFoundError when executing statistics.py in the console after code changes (e.g. git pull)
# somehow pip install does not keep track of packages
import mialab.utilities.statistic_utilities as statistic_utilities
if __name__ == "__main__":
"""Output overall statistics of a results.csv"""
if(len(sys.argv) > 1):
result_file = sys.argv[1]
else:
script_dir = os.path.dirname(sys.argv[0])
path = os.path.normpath(os.path.join(script_dir, './mia-result'))
dirs = [f for f in os.listdir(path) if os.path.isdir(os.path.join(path, f))]
result_file = os.path.join(path, dirs[-1], 'results.csv')
print('Statistics for ', result_file)
print(statistic_utilities.gather_statistics(result_file))
| {
"content_hash": "1856edcf7f25fe935e0a0c34692b7df6",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 119,
"avg_line_length": 41.27272727272727,
"alnum_prop": 0.6806167400881057,
"repo_name": "mrunibe/MIALab",
"id": "bef3341c6b047522b06ad5233c0ea8e224756e1c",
"size": "908",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/statistics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Matlab",
"bytes": "4666"
},
{
"name": "Python",
"bytes": "243162"
},
{
"name": "Shell",
"bytes": "510"
},
{
"name": "TeX",
"bytes": "352153"
}
],
"symlink_target": ""
} |
'''
Global variables
'''
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
| {
"content_hash": "2a7292b96794d1a5fc490b242ac35839",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 39,
"avg_line_length": 12.714285714285714,
"alnum_prop": 0.6966292134831461,
"repo_name": "TheWaWaR/py-marked-diff",
"id": "40264ce270caff7cddd36e5e116e56dfd9d2d9b9",
"size": "105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/gvars.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36753"
}
],
"symlink_target": ""
} |
"""
User view
"""
from ..utils import err
from ..models import db, User, Library, Permissions
from ..client import client
from base_view import BaseView
from flask import current_app
from flask.ext.discoverer import advertise
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import NoResultFound
from http_errors import MISSING_USERNAME_ERROR
class HarbourView(BaseView):
"""
End point to import libraries from external systems
"""
decorators = [advertise('scopes', 'rate_limit')]
scopes = ['user']
rate_limit = [1000, 60*60*24]
service_url = 'default'
@staticmethod
def upsert_library(service_uid, library):
"""
Upsert a library into the database. This entails:
- Adding a library and bibcodes if there is no name conflict
- Not adding a library if name matches, but compare bibcodes
:param service_uid: microservice UID of the user
:param library: dictionary of the form:
{'name': str, 'description': str, 'documents': [str, ...., str]}
:return: boolean for success
"""
# Make the permissions
user = User.query.filter(User.id == service_uid).one()
try:
# XXX: is a join faster or slower than this logic? or is it even
# important in this scenario?
# Find all the permissions of the user
permissions = Permissions.query\
.filter(Permissions.user_id == user.id)\
.filter(Permissions.owner == True)\
.all()
if len(permissions) == 0:
raise NoResultFound
# Collect all the libraries for which they are the owner
libs = [Library.query.filter(Library.id == permission.library_id).one() for permission in permissions]
lib = [lib_ for lib_ in libs if lib_.name == library['name']]
# Raise if there is not exactly one, it should be 1 or 0, but if
# multiple are returned, there is some problem
if len(lib) == 0:
raise NoResultFound
current_app.logger.info(
'User does not have a library with this name'
)
elif len(lib) > 1:
current_app.logger.warning(
'More than 1 library has the same name,'
' this should not happen: {}'.format(lib)
)
raise IntegrityError
# Get the single record returned, as names are considered unique in
# the workflow of creating libraries
lib = lib[0]
bibcode_before = len(lib.get_bibcodes())
lib.add_bibcodes(library['documents'])
bibcode_added = len(lib.get_bibcodes()) - bibcode_before
action = 'updated'
db.session.add(lib)
except NoResultFound:
current_app.logger.info('Creating library from scratch: {}'
.format(library))
permission = Permissions(owner=True)
lib = Library(
name=library['name'][0:50],
description=library['description'][0:200],
)
lib.add_bibcodes(library['documents'])
lib.permissions.append(permission)
user.permissions.append(permission)
db.session.add_all([lib, permission, user])
bibcode_added = len(lib.get_bibcodes())
action = 'created'
db.session.commit()
return {
'library_id': BaseView.helper_uuid_to_slug(lib.id),
'name': lib.name,
'description': lib.description,
'num_added': bibcode_added,
'action': action
}
# Methods
def get(self):
"""
HTTP GET request that
:return:
Header:
Must contain the API forwarded user ID of the user accessing the end
point
Post body:
----------
No post content accepted.
Return data:
-----------
Permissions:
-----------
The following type of user can read a library:
- user scope (authenticated via the API)
"""
# Check that they pass a user id
try:
user = self.helper_get_user_id()
except KeyError:
return err(MISSING_USERNAME_ERROR)
service_uid = self.helper_absolute_uid_to_service_uid(absolute_uid=user)
url = '{external_service}/{user_id}'.format(
external_service=current_app.config[self.service_url],
user_id=user
)
current_app.logger.info('Collecting libraries for user {} from {}'
.format(user, url))
response = client().get(url)
if response.status_code != 200:
return response.json(), response.status_code
resp = []
for library in response.json()['libraries']:
resp.append(self.upsert_library(service_uid=service_uid, library=library))
return resp, 200
class ClassicView(HarbourView):
"""
Wrapper for importing libraries from ADS Classic
"""
decorators = [advertise('scopes', 'rate_limit')]
scopes = ['user']
rate_limit = [1000, 60*60*24]
service_url = 'BIBLIB_CLASSIC_SERVICE_URL'
class TwoPointOhView(HarbourView):
"""
Wrapper for importing libraries from ADS 2.0
"""
decorators = [advertise('scopes', 'rate_limit')]
scopes = ['user']
rate_limit = [1000, 60*60*24]
service_url = 'BIBLIB_TWOPOINTOH_SERVICE_URL'
| {
"content_hash": "afea771db352bc3444e1b6e0a4625788",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 114,
"avg_line_length": 31.836158192090394,
"alnum_prop": 0.5669920141969832,
"repo_name": "jonnybazookatone/biblib-service",
"id": "d0f98ea7f0bb8374f96b6a68f9c8ea101073a6e3",
"size": "5635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "biblib/views/classic_view.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Puppet",
"bytes": "1258"
},
{
"name": "Python",
"bytes": "415818"
},
{
"name": "Shell",
"bytes": "533"
}
],
"symlink_target": ""
} |
"""Tests for base_inference_runner."""
from __future__ import annotations
from typing import Any, List
from absl.testing import absltest
import jax
import numpy as np
from paxml import base_inference_runner
from praxis import base_hyperparams
from praxis import base_layer
from praxis import base_model
from praxis import py_utils
from praxis import pytypes
from praxis import test_utils
from praxis import train_states
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
instantiate = base_hyperparams.instantiate
NestedMap = py_utils.NestedMap
NestedWeightHParams = base_layer.NestedWeightHParams
PRNGKey = pytypes.PRNGKey
TrainState = train_states.TrainState
class DummyInference(base_inference_runner.BaseInferenceRunner):
class HParams(base_inference_runner.BaseInferenceRunner.HParams):
output: Any = None
output_schema: Any = None
def infer(self, train_state: TrainState, prng_key: PRNGKey,
var_weight_hparams: NestedWeightHParams,
input_batch: NestedMap) -> NestedMap:
return self.hparams.output
@property
def output_schema(self) -> NestedMap:
return self.hparams.output_schema
class BaseInferenceRunnerTest(test_utils.TestCase):
def test_infer(self):
dummy_output = NestedMap(
tensor=np.arange(64, dtype=np.float32).reshape(8, 8),
nested=NestedMap(
text=np.array([f'{i}'.encode('utf-8') for i in range(8)],
dtype=object)))
dummy_schema = NestedMap(
tensor=tfds.features.Tensor(shape=(8,), dtype=tf.float32),
nested=NestedMap(text=tfds.features.Text()))
infer_runner_p = DummyInference.HParams(
output=dummy_output, output_schema=dummy_schema)
infer_runner = infer_runner_p.Instantiate(model=None)
serialized_outputs = infer_runner.serialize_outputs(
# Pass dummy values to all 4 arguments of infer().
infer_runner.infer(*([None] * 4)))
expected_outputs: List[NestedMap] = py_utils.tree_unstack(dummy_output, 0)
self.assertEqual(len(serialized_outputs), len(expected_outputs))
features_dict = tfds.features.FeaturesDict(dummy_schema)
for serialized, expected in zip(serialized_outputs, expected_outputs):
output = features_dict.deserialize_example(serialized)
output_np = jax.tree_map(lambda x: x.numpy(), output)
for output_leaf, expected_leaf in zip(
jax.tree_util.tree_leaves(output_np),
jax.tree_util.tree_leaves(expected)):
self.assertArraysEqual(output_leaf, expected_leaf)
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "99914873188e71a9f037016a36c96f9e",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 78,
"avg_line_length": 32.848101265822784,
"alnum_prop": 0.7148362235067437,
"repo_name": "google/paxml",
"id": "008b8e53e091a1981d83c37bb25c4cda44723be5",
"size": "3186",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "paxml/base_inference_runner_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7552"
},
{
"name": "Python",
"bytes": "916641"
},
{
"name": "Shell",
"bytes": "11373"
},
{
"name": "Starlark",
"bytes": "42602"
}
],
"symlink_target": ""
} |
from cStringIO import StringIO
from pyglet.media import avbin
av = avbin.av
from ctypes import *
class URLContext(Structure):
_fields_ = [
("prot", c_void_p),
("flags", c_int),
("is_streamed", c_int),
("max_packet_size", c_int),
("priv_data", c_void_p),
("filename", c_char_p)
]
URL_OPEN = CFUNCTYPE(c_int, POINTER(URLContext), c_char_p, c_int)
URL_READ = CFUNCTYPE(c_int, POINTER(URLContext), c_void_p, c_int)
URL_WRITE = CFUNCTYPE(c_int, POINTER(URLContext), c_void_p, c_int)
URL_SEEK = CFUNCTYPE(c_int64, POINTER(URLContext), c_int64, c_int)
URL_CLOSE = CFUNCTYPE(c_int, POINTER(URLContext))
URL_READ_PAUSE = CFUNCTYPE(c_int, POINTER(URLContext), c_int)
URL_READ_SEEK = CFUNCTYPE(c_int64, POINTER(URLContext), c_int, c_int64, c_int)
URL_GET_FILE_HANDLE = CFUNCTYPE(c_int, POINTER(URLContext))
class URLProtocol(Structure):
pass
URLProtocol._fields_ = [
("name", c_char_p),
("url_open", URL_OPEN),
("url_read", URL_READ),
("url_write", URL_WRITE),
("url_seek", URL_SEEK),
("url_close", URL_CLOSE),
("next", POINTER(URLProtocol)),
("url_read_pause", URL_READ_PAUSE),
("url_read_seek", URL_READ_SEEK),
("url_get_file_handle", URL_GET_FILE_HANDLE),
]
class ProtocolPrivateData(Structure):
_fields_ = [
('data', py_object)
]
memory_protocol = URLProtocol()
data = create_string_buffer('mem')
name = c_char_p(addressof(data))
memory_protocol.name = name
privates = []
current_file = None
def url_open(context, path, flags):
private_data = ProtocolPrivateData()
global current_file
private_data.data = current_file
current_file = None
privates.append(private_data)
context.contents.priv_data = cast(pointer(private_data), c_void_p)
return 0
def url_read(context, buffer, size):
io = ProtocolPrivateData.from_address(context.contents.priv_data).data
read_data = io.read(size)
memmove(buffer, read_data, len(read_data))
return len(read_data)
def url_seek(context, pos, whence):
io = ProtocolPrivateData.from_address(context.contents.priv_data).data
if whence == 0x10000: # AVSEEK_SIZE:
pos = io.tell()
io.seek(0, 2)
size = io.tell()
io.seek(pos)
return size
io.seek(pos, whence)
return io.tell()
def url_close(context):
priv = ProtocolPrivateData.from_address(context.contents.priv_data)
address = addressof(priv)
for item in privates[:]:
if addressof(item) == address:
privates.remove(item)
break
return 0
memory_protocol.url_open = URL_OPEN(url_open)
memory_protocol.url_read = URL_READ(url_read)
memory_protocol.url_seek = URL_SEEK(url_seek)
memory_protocol.url_close = URL_CLOSE(url_close)
av.register_protocol(byref(memory_protocol))
class AVbinMemorySource(avbin.AVbinSource):
def __init__(self, filename, file = None):
if file is not None:
filename = 'mem:'
global current_file
current_file = file
return super(AVbinMemorySource, self).__init__(filename)
avbin.AVbinSource = AVbinMemorySource
if __name__ == '__main__':
from pyglet.media import load
foo = load(None, open("test.ogg", 'rb'),
streaming = True)
foo.play()
import pyglet
pyglet.app.run()
| {
"content_hash": "df3b4e9986775bf562dcce299dde2736",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 79,
"avg_line_length": 29.371681415929203,
"alnum_prop": 0.6456764085567942,
"repo_name": "fos/fos-legacy",
"id": "5e40aa7366abe4344b8ce306cd294e843e1ec0d2",
"size": "3319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scratch/mem_load.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "7294"
},
{
"name": "Erlang",
"bytes": "2662"
},
{
"name": "Haskell",
"bytes": "1973"
},
{
"name": "JavaScript",
"bytes": "432354"
},
{
"name": "Python",
"bytes": "1231025"
}
],
"symlink_target": ""
} |
from tartist.app import gan
from tartist.app.gan import GANGraphKeys
from tartist.core import get_env, get_logger
from tartist.core.utils.naming import get_dump_directory, get_data_directory
from tartist.nn import opr as O, optimizer, summary
logger = get_logger(__file__)
__envs__ = {
'dir': {
'root': get_dump_directory(__file__),
'data': get_data_directory('WellKnown/mnist')
},
'trainer': {
'learning_rate': 0.001,
'batch_size': 100,
'epoch_size': 500,
'nr_epochs': 100,
'env_flags': {
'log_device_placement': False
}
},
}
__trainer_cls__ = gan.GANTrainer
__trainer_env_cls__ = gan.GANTrainerEnv
def make_network(env):
with env.create_network() as net:
code_length = 20
h, w, c = 28, 28, 1
is_train = env.phase == env.Phase.TRAIN
dpc = env.create_dpcontroller()
with dpc.activate():
def inputs():
img = O.placeholder('img', shape=(None, h, w, c))
return [img]
def forward(img):
g_batch_size = get_env('trainer.batch_size') if env.phase is env.Phase.TRAIN else 1
z = O.as_varnode(tf.random_normal([g_batch_size, code_length]))
with env.variable_scope(GANGraphKeys.GENERATOR_VARIABLES):
_ = z
with O.argscope(O.fc, nonlin=O.tanh):
_ = O.fc('fc1', _, 500)
_ = O.fc('fc3', _, 784, nonlin=O.sigmoid)
x_given_z = _.reshape(-1, 28, 28, 1)
def discriminator(x):
_ = x
with O.argscope(O.fc, nonlin=O.tanh):
_ = O.fc('fc1', _, 500)
_ = O.fc('fc3', _, 1)
logits = _
return logits
if is_train:
with env.variable_scope(GANGraphKeys.DISCRIMINATOR_VARIABLES):
logits_real = discriminator(img).flatten()
score_real = O.sigmoid(logits_real)
with env.variable_scope(GANGraphKeys.DISCRIMINATOR_VARIABLES, reuse=is_train):
logits_fake = discriminator(x_given_z).flatten()
score_fake = O.sigmoid(logits_fake)
if is_train:
# build loss
with env.variable_scope('loss'):
d_loss_real = O.sigmoid_cross_entropy_with_logits(
logits=logits_real, labels=O.ones_like(logits_real)).mean()
d_loss_fake = O.sigmoid_cross_entropy_with_logits(
logits=logits_fake, labels=O.zeros_like(logits_fake)).mean()
g_loss = O.sigmoid_cross_entropy_with_logits(
logits=logits_fake, labels=O.ones_like(logits_fake)).mean()
d_acc_real = (score_real > 0.5).astype('float32').mean()
d_acc_fake = (score_fake < 0.5).astype('float32').mean()
g_accuracy = (score_fake > 0.5).astype('float32').mean()
d_accuracy = .5 * (d_acc_real + d_acc_fake)
d_loss = .5 * (d_loss_real + d_loss_fake)
dpc.add_output(d_loss, name='d_loss', reduce_method='sum')
dpc.add_output(d_accuracy, name='d_accuracy', reduce_method='sum')
dpc.add_output(d_acc_real, name='d_acc_real', reduce_method='sum')
dpc.add_output(d_acc_fake, name='d_acc_fake', reduce_method='sum')
dpc.add_output(g_loss, name='g_loss', reduce_method='sum')
dpc.add_output(g_accuracy, name='g_accuracy', reduce_method='sum')
dpc.add_output(x_given_z, name='output')
dpc.add_output(score_fake, name='score')
dpc.set_input_maker(inputs).set_forward_func(forward)
if is_train:
for acc in ['d_accuracy', 'd_acc_real', 'd_acc_fake']:
summary.scalar(acc, dpc.outputs[acc], collections=[GANGraphKeys.DISCRIMINATOR_SUMMARIES])
summary.scalar('g_accuracy', dpc.outputs['g_accuracy'], collections=[GANGraphKeys.GENERATOR_SUMMARIES])
net.add_all_dpc_outputs(dpc)
def make_optimizer(env):
lr = optimizer.base.make_optimizer_variable('learning_rate', get_env('trainer.learning_rate'))
wrapper = optimizer.OptimizerWrapper()
wrapper.set_base_optimizer(optimizer.base.AdamOptimizer(lr))
wrapper.append_grad_modifier(optimizer.grad_modifier.LearningRateMultiplier([
('*/b', 2.0),
]))
env.set_g_optimizer(wrapper)
env.set_d_optimizer(wrapper)
from data_provider_gan_mnist import *
def main_train(trainer):
from tartist.plugins.trainer_enhancer import summary
summary.enable_summary_history(trainer)
summary.enable_echo_summary_scalar(trainer)
from tartist.plugins.trainer_enhancer import progress
progress.enable_epoch_progress(trainer)
from tartist.plugins.trainer_enhancer import snapshot
snapshot.enable_snapshot_saver(trainer)
# TODO(MJY): does not support inference_runner now
# from tartist.plugins.trainer_enhancer import inference
# inference.enable_inference_runner(trainer, make_dataflow_inference)
trainer.train()
| {
"content_hash": "10c0dc455bbb71df7e47f6dba8ad89a2",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 115,
"avg_line_length": 38.94202898550725,
"alnum_prop": 0.5593598809080759,
"repo_name": "vacancy/TensorArtist",
"id": "99700fac84ab23d9346a3432f90be11cdf4d0d08",
"size": "5544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/generative-model/desc_gan_mnist_mlp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "497134"
},
{
"name": "Shell",
"bytes": "630"
}
],
"symlink_target": ""
} |
"""Shared code for attacks and defenses."""
import abc
import inspect
import os
import numpy as np
import tensorflow as tf
class DefenseModel(abc.ABC):
"""Base class which defines interface for all defense models.
This base class supports two options for defense model:
- defense model which only returns predictions of target class.
- defense model with detector of adversarial examples. Such model
returns both output of detector and predictions of target class.
If subclass need to implement model with detector then it should override
`detect` method. Otherwise `detect` should be kept as is.
Note that DefenseModel only provides method for forward pass and does
not provide method to compute gradients. This is intentional and it's done
because gradients for most of the defenses in the tutorials are not useful
for adversarial examples search.
"""
@abc.abstractmethod
def classify(self, x):
"""Performs forward pass on the model.
Args:
x: input as either numpy array or Tensorflow Tensor with shape [B, H, W, C],
each value of the input should be float in [0, 1] range.
Returns:
NumPy array with normalized probabilities returned by the model.
Shape of return value is [B, NUM_LABELS].
This method has to be implemented in the subclass.
"""
pass
"""
Threshold for detecting if an input is adversarial, by default 1/2.
"""
threshold = 0.5
def detect(self, x):
"""Perform forward pass on the adversarial detector.
Args:
x: input as either numpy array or Tensorflow Tensor with shape [B, H, W, C],
each value of the input should be float in [0, 1] range.
Returns:
NumPy array with shape [B] of floats from [-inf,inf] with a higher value
when the input is more likely to be adversarial, and should be rejected.
If your model does not provide a detector then leave it as is.
If your model provides detector then override this method in the subclass.
"""
return np.zeros(x.shape[0])
def __call__(self, x):
"""Performs both inference and detection classification on the model.
Args:
x: input as numpy array with shape [B, H, W, C] or [H, W, C]
each value of the input should be float in [0, 1] range.
Returns:
numpy array with normalized predictions returned by the model.
Shape of return value is [B, NUM_LABELS] if input is batched or [NUM_LABELS] otherwise.
All predictions are in [0, 1] range.
This is a convenience method which performs some basic argument checks
and defers work to `forward`.
Thus subclasses should implement forward pass logic should be in `forward` method instead.
"""
assert len(x.shape) == 3 or len(x.shape) == 4
assert isinstance(x, np.ndarray)
if len(x.shape) == 3:
# x is a single example
x = np.expand_dims(x, axis=0)
return self.classify(x), self.detect(x)
else:
# x is a batch of examples
return self.classify(x), self.detect(x)
class Attack(abc.ABC):
"""Base class which defines interface for all attacks.
Environment will create new instance of attack for each adversarial example.
"""
def __init__(self, task):
"""
Constructs an Attack class that generates adversarial examples
for a particular TaskDefinition.
"""
self.task = task
@abc.abstractmethod
def attack(self, model, x, y):
"""Peforms adversarial attack on batch of examples.
Args:
model: instance of DefenseModel.
x: numpy array with batch of input examples.
Shape is [B, H, W, C], values are in [0, 1] range.
y: numpy array with shape [B], which contains true labels of examples.
Returns:
numpy array with adversarial examples, same shape and type as input x.
"""
pass
class NonBatchedAttack(Attack):
"""Base class for attacks which are implemented for single example instead batch.
This is a subclass of `Attack` which simplifies process of writing attacks which can not
be batched easily. Instead of implementing attack on a batch of examples, subclasses of
this class need to implement attack on single example.
Nevertherless, to make evaluation faster, it's recommended to subclass directly
from `Attack` and implement attack on the batch whenever it's possible and easy to do.
"""
def attack(self, model, x, y):
result = []
for i in range(len(x)):
adv_ex = self.attack_one_example(model, x[i], y[i])
result.append(np.expand_dims(adv_ex, axis=0))
return np.concatenate(result, axis=0)
@abc.abstractmethod
def attack_one_example(self, model, x, y):
"""Peforms adversarial attack on a single example.
Args:
model: instance of DefenseModel.
x: numpy array with input example.
Shape is [H, W, C], values are in [0, 1] range.
y: true label of the input example.
Returns:
numpy array with adversarial example, same shape and type as input x.
"""
pass
class TaskDefinition(abc.ABC):
"""Base class for all task definitions."""
@abc.abstractmethod
def evaluate(self, example_idx, true_label,
src_example, adv_example,
src_pred, adv_pred,
src_detector, adv_detector):
"""Evaluate attack on one example.
Args:
example_idx: index of the example in the dataset.
Could be useful if certain examples in the dataset need to be skipped or
require special handling.
true_label: true label of the example.
src_example: numpy array with source example, shape [H, W, C].
adv_example: numpy array with adversarial example, shape [H, W, C].
src_pred: probabilities vector with model prediction on source example.
adv_pred: probabilities vector with model prediction on adversarial example.
src_detector: probability that input is adversarial as returned by detector on source example.
adv_detector: probability that input is adversarial as returned by detector on adversarial example.
Returns:
success: True is task passed on the example, False otherwise.
reason: If task failed then the reason why.
This method should verify following:
- generated adversarial example is within required distance from the original example
- class label was flipped on adversarial example compared to original example
- detector returns low probability of adversarial example being adversarial (if detector is available)
If all conditions are met then method should return True.
If any condition is not satisfied then method should return False.
"""
pass
class LpTask(TaskDefinition):
"""Task definition for adversarial examples in L_{p} ball."""
def __init__(self, norm, threshold):
"""Initializes Lp task definition.
Args:
norm: norm type to use. Interpretation is same as `ord` argument of `np.linalg.norm` function
when `np.linalg.norm` is used for vectors.
"""
self.threshold = threshold
self.norm = norm
def evaluate(self, defense, example_idx, true_label,
src_example, adv_example,
src_pred, adv_pred,
src_detector, adv_detector):
# Verify that the label is now incorrect
if np.argmax(adv_pred) == true_label:
return False, "Label {} matches true label {}".format(np.argmax(adv_pred), true_label)
# Verify that example is within the allowed Lp norm
distortion = np.linalg.norm((src_example - adv_example).flatten(), ord=self.norm)
if distortion > self.threshold + 1e-3:
return False, "Distortion {} exceeds bound {}".format(distortion, self.threshold)
# Verify that it's not detected as adversarial
if adv_detector > defense.threshold:
return False, "Adversarial example rejected by detector with score {}.".format(adv_detector)
return True, None
def get_checkpoint_abs_path(rel_path):
"""Converts relative path of checkpoint-related resource to absolute path.
Args:
rel_path: path of the file relative to repository root.
Returns:
Absolute path of the file.
Let's say that tutorials are located in `/path/to/tutorials/` directory,
which means that full path of this file is `/path/to/tutorials/common/defense.py`.
Then following call to this method:
`get_checkpoint_abs_path('checkpoints/model-1')`
will return `/path/to/tutorials/checkpoints/model-1`
"""
module_filename = inspect.getfile(inspect.currentframe())
module_dirname = os.path.dirname(os.path.abspath(module_filename))
tutorials_root = os.path.abspath(os.path.join(module_dirname, '..'))
return os.path.join(tutorials_root, rel_path)
| {
"content_hash": "321fdf5f78fbc3c908da2a062d73bc39",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 111,
"avg_line_length": 38.90041493775934,
"alnum_prop": 0.6437333333333334,
"repo_name": "google-research/selfstudy-adversarial-robustness",
"id": "95a3e5205598f1bc3481273a853b9f67a418d27c",
"size": "9951",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/framework.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "161383"
},
{
"name": "Shell",
"bytes": "10197"
}
],
"symlink_target": ""
} |
from distutils.core import setup
PACKAGE = "paypal_reporter"
NAME = "paypal_reporter"
DESCRIPTION = "A library to generate reports about activity in a PayPal account"
AUTHOR = "Itai Shirav"
AUTHOR_EMAIL = "itai@platonix.com"
URL = "https://github.com/ishirav/paypal-reporter"
VERSION = __import__(PACKAGE).__version__
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="MIT",
url=URL,
packages=['paypal_reporter'],
install_requires =['requests>=1.0', 'tabulate'],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
) | {
"content_hash": "4884602f7e9c8dbc257e5c39274481ee",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 80,
"avg_line_length": 29.933333333333334,
"alnum_prop": 0.6536748329621381,
"repo_name": "ishirav/paypal-reporter",
"id": "015f3f53f12a7dd77b89a3a98065f2cb40bb5251",
"size": "898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4625"
}
],
"symlink_target": ""
} |
"""
Talon.One API
Use the Talon.One API to integrate with your application and to manage applications and campaigns: - Use the operations in the [Integration API section](#integration-api) are used to integrate with our platform - Use the operation in the [Management API section](#management-api) to manage applications and campaigns. ## Determining the base URL of the endpoints The API is available at the same hostname as your Campaign Manager deployment. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerSession](https://docs.talon.one/integration-api/#operation/updateCustomerSessionV2) endpoint is `https://mycompany.talon.one/v2/customer_sessions/{Id}` # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class LoyaltyProjectionData(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'date': 'datetime',
'expiring_points': 'float',
'activating_points': 'float',
'projected_balance': 'float'
}
attribute_map = {
'date': 'date',
'expiring_points': 'expiringPoints',
'activating_points': 'activatingPoints',
'projected_balance': 'projectedBalance'
}
def __init__(self, date=None, expiring_points=None, activating_points=None, projected_balance=None, local_vars_configuration=None): # noqa: E501
"""LoyaltyProjectionData - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._date = None
self._expiring_points = None
self._activating_points = None
self._projected_balance = None
self.discriminator = None
self.date = date
self.expiring_points = expiring_points
self.activating_points = activating_points
self.projected_balance = projected_balance
@property
def date(self):
"""Gets the date of this LoyaltyProjectionData. # noqa: E501
Specific date of projection. # noqa: E501
:return: The date of this LoyaltyProjectionData. # noqa: E501
:rtype: datetime
"""
return self._date
@date.setter
def date(self, date):
"""Sets the date of this LoyaltyProjectionData.
Specific date of projection. # noqa: E501
:param date: The date of this LoyaltyProjectionData. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and date is None: # noqa: E501
raise ValueError("Invalid value for `date`, must not be `None`") # noqa: E501
self._date = date
@property
def expiring_points(self):
"""Gets the expiring_points of this LoyaltyProjectionData. # noqa: E501
Points that will be expired by the specified date. # noqa: E501
:return: The expiring_points of this LoyaltyProjectionData. # noqa: E501
:rtype: float
"""
return self._expiring_points
@expiring_points.setter
def expiring_points(self, expiring_points):
"""Sets the expiring_points of this LoyaltyProjectionData.
Points that will be expired by the specified date. # noqa: E501
:param expiring_points: The expiring_points of this LoyaltyProjectionData. # noqa: E501
:type: float
"""
if self.local_vars_configuration.client_side_validation and expiring_points is None: # noqa: E501
raise ValueError("Invalid value for `expiring_points`, must not be `None`") # noqa: E501
self._expiring_points = expiring_points
@property
def activating_points(self):
"""Gets the activating_points of this LoyaltyProjectionData. # noqa: E501
Points that will be active by the specified date. # noqa: E501
:return: The activating_points of this LoyaltyProjectionData. # noqa: E501
:rtype: float
"""
return self._activating_points
@activating_points.setter
def activating_points(self, activating_points):
"""Sets the activating_points of this LoyaltyProjectionData.
Points that will be active by the specified date. # noqa: E501
:param activating_points: The activating_points of this LoyaltyProjectionData. # noqa: E501
:type: float
"""
if self.local_vars_configuration.client_side_validation and activating_points is None: # noqa: E501
raise ValueError("Invalid value for `activating_points`, must not be `None`") # noqa: E501
self._activating_points = activating_points
@property
def projected_balance(self):
"""Gets the projected_balance of this LoyaltyProjectionData. # noqa: E501
Current balance plus projected active points, minus expiring points. # noqa: E501
:return: The projected_balance of this LoyaltyProjectionData. # noqa: E501
:rtype: float
"""
return self._projected_balance
@projected_balance.setter
def projected_balance(self, projected_balance):
"""Sets the projected_balance of this LoyaltyProjectionData.
Current balance plus projected active points, minus expiring points. # noqa: E501
:param projected_balance: The projected_balance of this LoyaltyProjectionData. # noqa: E501
:type: float
"""
if self.local_vars_configuration.client_side_validation and projected_balance is None: # noqa: E501
raise ValueError("Invalid value for `projected_balance`, must not be `None`") # noqa: E501
self._projected_balance = projected_balance
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LoyaltyProjectionData):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, LoyaltyProjectionData):
return True
return self.to_dict() != other.to_dict()
| {
"content_hash": "704faeedca8f8a1b2c9824bdcc4a7e91",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 732,
"avg_line_length": 37.28365384615385,
"alnum_prop": 0.6314635718891038,
"repo_name": "talon-one/talon_one.py",
"id": "51a1d3f22f3ab3d4dd32565bfa6a1de788a4baf0",
"size": "7772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "talon_one/models/loyalty_projection_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "238"
},
{
"name": "Python",
"bytes": "5139586"
},
{
"name": "Shell",
"bytes": "1826"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta, tzinfo
import logging
import pytest
from planet import data_filter, exceptions
LOGGER = logging.getLogger(__name__)
def test_and_filter():
f1 = {'type': 'testf1'}
f2 = {'type': 'testf2'}
res = data_filter.and_filter([f1, f2])
expected = {'type': 'AndFilter', 'config': [f1, f2]}
assert res == expected
def test_or_filter():
f1 = {'type': 'testf1'}
f2 = {'type': 'testf2'}
res = data_filter.or_filter([f1, f2])
expected = {'type': 'OrFilter', 'config': [f1, f2]}
assert res == expected
def test_not_filter():
f1 = {'type': 'testf1'}
res = data_filter.not_filter(f1)
expected = {'type': 'NotFilter', 'config': f1}
assert res == expected
def test__range_filter_success():
def _test_callback(x):
return x + 'a'
res = data_filter._range_filter('testfilter',
'testfield',
gt='a',
gte=None,
lt='b',
lte='c',
callback=_test_callback)
expected = {
'type': 'testfilter',
'field_name': 'testfield',
'config': {
'gt': 'aa', 'lt': 'ba', 'lte': 'ca'
}
}
assert expected == res
def test__range_filter_nocallback():
res = data_filter._range_filter('testfilter',
'testfield',
gt='a',
gte=None,
lt='b',
lte='c')
expected = {
'type': 'testfilter',
'field_name': 'testfield',
'config': {
'gt': 'a', 'lt': 'b', 'lte': 'c'
}
}
assert expected == res
def test__range_filter_no_conditionals():
def _test_callback(x): # pragma: no cover
return x + 'a'
with pytest.raises(exceptions.PlanetError):
data_filter._range_filter('testfilter',
'testfield',
gt=None,
gte=None,
lt=None,
lte=None,
callback=_test_callback)
class TZTest(tzinfo):
def __init__(self, offset=None):
self.offset = offset
super().__init__()
def utcoffset(self, dt):
return timedelta(hours=self.offset) if self.offset else None
@pytest.mark.parametrize(
"dtime,expected",
[(datetime(2022, 5, 1, 1, 0, 0, 1), '2022-05-01T01:00:00.000001Z'),
(datetime(2022, 5, 1, 1, 0, 1), '2022-05-01T01:00:01Z'),
(datetime(2022, 6, 1, 1, 1), '2022-06-01T01:01:00Z'),
(datetime(2022, 6, 1, 1), '2022-06-01T01:00:00Z'),
(datetime(2022, 6, 1, 1, tzinfo=TZTest(0)), '2022-06-01T01:00:00Z'),
(datetime(2022, 6, 1, 1, tzinfo=TZTest(1)), '2022-06-01T01:00:00+01:00')])
def test__datetime_to_rfc3339_basic(dtime, expected):
assert data_filter._datetime_to_rfc3339(dtime) == expected
def test_date_range_filter_success():
res = data_filter.date_range_filter('testfield',
gt=datetime(2022, 6, 1, 1),
lt=datetime(2022, 7, 1, 1))
expected = {
'type': 'DateRangeFilter',
'field_name': 'testfield',
'config': {
'gt': '2022-06-01T01:00:00Z', 'lt': '2022-07-01T01:00:00Z'
}
}
assert res == expected
def test_range_filter_noconditionals():
with pytest.raises(exceptions.PlanetError):
data_filter.range_filter('acquired')
def test_range_filter_success():
res = data_filter.range_filter('testfield', gt=0.1, lt=0.9)
expected = {
'type': 'RangeFilter',
'field_name': 'testfield',
'config': {
'gt': 0.1, 'lt': 0.9
}
}
assert res == expected
def test_date_range_filter_noconditionals():
with pytest.raises(exceptions.PlanetError):
data_filter.date_range_filter('acquired')
def test_update_filter_success():
res = data_filter.update_filter('testfield', gt=datetime(2022, 6, 1, 1))
expected = {
'type': 'UpdateFilter',
'field_name': 'testfield',
'config': {
'gt': '2022-06-01T01:00:00Z'
}
}
assert res == expected
def test_update_filter_noconditionals():
with pytest.raises(exceptions.PlanetError):
data_filter.update_filter('acquired')
@pytest.mark.parametrize("geom_fixture",
[('geom_geojson'), ('feature_geojson'),
('featurecollection_geojson')])
def test_geometry_filter(geom_fixture, request, geom_geojson):
geom = request.getfixturevalue(geom_fixture)
res = data_filter.geometry_filter(geom)
expected = {
'type': 'GeometryFilter',
'field_name': 'geometry',
'config': geom_geojson
}
assert res == expected
def test_number_in_filter():
res = data_filter.number_in_filter('testfield', [3, 3])
expected = {
'type': 'NumberInFilter', 'field_name': 'testfield', 'config': [3, 3]
}
assert res == expected
def test_string_in_filter():
res = data_filter.string_in_filter('testfield', ['three', 'three'])
expected = {
'type': 'StringInFilter',
'field_name': 'testfield',
'config': ['three', 'three']
}
assert res == expected
def test_asset_filter():
res = data_filter.asset_filter(['asset1', 'asset2'])
expected = {'type': 'AssetFilter', 'config': ['asset1', 'asset2']}
assert res == expected
def test_permission_filter():
res = data_filter.permission_filter()
expected = {'type': 'PermissionFilter', 'config': ['assets:download']}
assert res == expected
def test_std_quality_filter():
res = data_filter.std_quality_filter()
expected = {
'type': 'StringInFilter',
'field_name': 'quality_category',
'config': ['standard']
}
assert res == expected
| {
"content_hash": "4ea5db055288b54e60f88934a0169bfb",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 79,
"avg_line_length": 28.793427230046948,
"alnum_prop": 0.5225827490624491,
"repo_name": "planetlabs/planet-client-python",
"id": "4e173a2159f3920ea6d055cb0ef768778ba4e9db",
"size": "6747",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/unit/test_data_filter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "373344"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import cms.models.fields
class Migration(migrations.Migration):
operations = [
migrations.CreateModel(
name='Example1',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('char_1', models.CharField(max_length=255, verbose_name='char_1')),
('char_2', models.CharField(max_length=255, verbose_name='char_2')),
('char_3', models.CharField(max_length=255, verbose_name='char_3')),
('char_4', models.CharField(max_length=255, verbose_name='char_4')),
('placeholder', cms.models.fields.PlaceholderField(to='cms.Placeholder', null=True, slotname=b'placeholder')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Example2',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('char_1', models.CharField(max_length=255, verbose_name='char_1')),
('char_2', models.CharField(max_length=255, verbose_name='char_2')),
('char_3', models.CharField(max_length=255, verbose_name='char_3')),
('char_4', models.CharField(max_length=255, verbose_name='char_4')),
('placeholder', cms.models.fields.PlaceholderField(to='cms.Placeholder', null=True, slotname=b'placeholder')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Example3',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('char_1', models.CharField(max_length=255, verbose_name='char_1')),
('char_2', models.CharField(max_length=255, verbose_name='char_2')),
('char_3', models.CharField(max_length=255, verbose_name='char_3')),
('char_4', models.CharField(max_length=255, verbose_name='char_4')),
('placeholder', cms.models.fields.PlaceholderField(to='cms.Placeholder', null=True, slotname=b'placeholder')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Example4',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('char_1', models.CharField(max_length=255, verbose_name='char_1')),
('char_2', models.CharField(max_length=255, verbose_name='char_2')),
('char_3', models.CharField(max_length=255, verbose_name='char_3')),
('char_4', models.CharField(max_length=255, verbose_name='char_4')),
('placeholder', cms.models.fields.PlaceholderField(to='cms.Placeholder', null=True, slotname=b'placeholder')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Example5',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('char_1', models.CharField(max_length=255, verbose_name='char_1')),
('char_2', models.CharField(max_length=255, verbose_name='char_2')),
('char_3', models.CharField(max_length=255, verbose_name='char_3')),
('char_4', models.CharField(max_length=255, verbose_name='char_4')),
('placeholder_1', cms.models.fields.PlaceholderField(related_name='p1', slotname=b'placeholder_1', to='cms.Placeholder', null=True)),
('placeholder_2', cms.models.fields.PlaceholderField(related_name='p2', slotname=b'placeholder_2', to='cms.Placeholder', null=True)),
],
options={
},
bases=(models.Model,),
),
]
| {
"content_hash": "ae95dc229b40a2192f0c360a494a339e",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 149,
"avg_line_length": 51.32098765432099,
"alnum_prop": 0.5556891989415443,
"repo_name": "pbs/django-cms",
"id": "d3545e860a5a65fcb6f87c6e4be0db8902505ffe",
"size": "4181",
"binary": false,
"copies": "1",
"ref": "refs/heads/support/2.3.x",
"path": "cms/test_utils/project/placeholderapp/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "108603"
},
{
"name": "HTML",
"bytes": "289317"
},
{
"name": "JavaScript",
"bytes": "657946"
},
{
"name": "PHP",
"bytes": "4430"
},
{
"name": "Python",
"bytes": "2151038"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Course.has_groups'
db.add_column('courses_course', 'has_groups',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Course.group_max_size'
db.add_column('courses_course', 'group_max_size',
self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=8),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Course.has_groups'
db.delete_column('courses_course', 'has_groups')
# Deleting field 'Course.group_max_size'
db.delete_column('courses_course', 'group_max_size')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '254'})
},
'badges.alignment': {
'Meta': {'object_name': 'Alignment'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'badges.badge': {
'Meta': {'ordering': "['-modified', '-created']", 'object_name': 'Badge'},
'alignments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'alignments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['badges.Alignment']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'criteria': ('django.db.models.fields.URLField', [], {'max_length': '255'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'tags'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['badges.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'badges.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'courses.announcement': {
'Meta': {'ordering': "('-datetime',)", 'object_name': 'Announcement'},
'content': ('tinymce.models.HTMLField', [], {}),
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.Course']", 'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'courses.attachment': {
'Meta': {'object_name': 'Attachment'},
'attachment': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kq': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.KnowledgeQuantum']"})
},
'courses.course': {
'Meta': {'ordering': "['order']", 'object_name': 'Course'},
'background': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'certification_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'completion_badge': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'course'", 'null': 'True', 'to': "orm['badges.Badge']"}),
'created_from': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'courses_created_of'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['courses.Course']"}),
'description': ('tinymce.models.HTMLField', [], {}),
'ects': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '8'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'enrollment_method': ('django.db.models.fields.CharField', [], {'default': "'free'", 'max_length': '200'}),
'estimated_effort': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'forum_slug': ('django.db.models.fields.CharField', [], {'max_length': '350', 'null': 'True', 'blank': 'True'}),
'group_max_size': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '8'}),
'has_groups': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hashtag': ('django.db.models.fields.CharField', [], {'default': "'Hashtag'", 'max_length': '128'}),
'highlight': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intended_audience': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'is_activity_clonable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['courses.Language']", 'symmetrical': 'False'}),
'learning_goals': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'max_mass_emails_month': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '200'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'courses_as_owner'", 'to': "orm['auth.User']"}),
'promotion_media_content_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'promotion_media_content_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'requirements': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'static_page': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['courses.StaticPage']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '10'}),
'students': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'courses_as_student'", 'blank': 'True', 'through': "orm['courses.CourseStudent']", 'to': "orm['auth.User']"}),
'teachers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'courses_as_teacher'", 'symmetrical': 'False', 'through': "orm['courses.CourseTeacher']", 'to': "orm['auth.User']"}),
'threshold': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '4', 'decimal_places': '2', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'thumbnail_alt': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'courses.coursestudent': {
'Meta': {'object_name': 'CourseStudent'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.Course']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'old_course_status': ('django.db.models.fields.CharField', [], {'default': "'f'", 'max_length': '1'}),
'progress': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'rate': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'courses.courseteacher': {
'Meta': {'ordering': "['order']", 'object_name': 'CourseTeacher'},
'course': ('adminsortable.fields.SortableForeignKey', [], {'to': "orm['courses.Course']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'teacher': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'courses.knowledgequantum': {
'Meta': {'ordering': "['order']", 'unique_together': "(('title', 'unit'),)", 'object_name': 'KnowledgeQuantum'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media_content_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'media_content_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'supplementary_material': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'teacher_comments': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'unit': ('adminsortable.fields.SortableForeignKey', [], {'to': "orm['courses.Unit']"}),
'weight': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
},
'courses.language': {
'Meta': {'object_name': 'Language'},
'abbr': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'courses.option': {
'Meta': {'unique_together': "(('question', 'x', 'y'),)", 'object_name': 'Option'},
'feedback': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '12'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'optiontype': ('django.db.models.fields.CharField', [], {'default': "'t'", 'max_length': '1'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.Question']"}),
'solution': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '100'}),
'x': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'y': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'courses.question': {
'Meta': {'object_name': 'Question'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kq': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.KnowledgeQuantum']", 'unique': 'True'}),
'last_frame': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'solution_media_content_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'solution_media_content_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'solution_text': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'use_last_frame': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'courses.staticpage': {
'Meta': {'object_name': 'StaticPage'},
'body': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'courses.transcription': {
'Meta': {'object_name': 'Transcription'},
'filename': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kq': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.KnowledgeQuantum']"}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.Language']"}),
'transcription_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'})
},
'courses.unit': {
'Meta': {'ordering': "['order']", 'unique_together': "(('title', 'course'),)", 'object_name': 'Unit'},
'course': ('adminsortable.fields.SortableForeignKey', [], {'to': "orm['courses.Course']"}),
'deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '10'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'unittype': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'weight': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
}
}
complete_apps = ['courses'] | {
"content_hash": "1465782f0a411c901cd4d828b561a36a",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 232,
"avg_line_length": 79.79475982532752,
"alnum_prop": 0.5486783779346577,
"repo_name": "GeographicaGS/moocng",
"id": "7b829ef3330a646f724d2e913bacade70059f042",
"size": "18297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moocng/courses/migrations/0050_groupsconfig.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "162701"
},
{
"name": "HTML",
"bytes": "362912"
},
{
"name": "JavaScript",
"bytes": "1911286"
},
{
"name": "Python",
"bytes": "2723710"
},
{
"name": "Shell",
"bytes": "24842"
}
],
"symlink_target": ""
} |
import pytest
from pants_test.backend.jvm.tasks.jvm_compile.rsc.rsc_compile_integration_base import (
RscCompileIntegrationBase,
ensure_compile_rsc_execution_strategy,
)
class RscCompileIntegrationYoutline(RscCompileIntegrationBase):
@ensure_compile_rsc_execution_strategy(RscCompileIntegrationBase.outline_and_zinc)
def test_basic_binary(self):
self._testproject_compile("mutual", "bin", "A")
@pytest.mark.flaky(retries=1)
@ensure_compile_rsc_execution_strategy(RscCompileIntegrationBase.outline_and_zinc)
def test_public_inference(self):
self._testproject_compile("public_inference", "public_inference", "PublicInference")
@ensure_compile_rsc_execution_strategy(
RscCompileIntegrationBase.outline_and_zinc,
PANTS_COMPILE_RSC_SCALA_WORKFLOW_OVERRIDE="zinc-only",
)
def test_workflow_override(self):
self._testproject_compile("mutual", "bin", "A", outline_result=False)
def test_youtline_hermetic_jvm_options(self):
self._test_hermetic_jvm_options(self.outline_and_zinc)
| {
"content_hash": "1e084488a0ccb5aca2c29697308e70d1",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 92,
"avg_line_length": 39.592592592592595,
"alnum_prop": 0.7380729653882133,
"repo_name": "wisechengyi/pants",
"id": "c06f125752116575ff8269e6f8d251d7303b6f61",
"size": "1201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/backend/jvm/tasks/jvm_compile/rsc/test_rsc_compile_integration_youtline.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "2010"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "6634"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2765"
},
{
"name": "HTML",
"bytes": "44381"
},
{
"name": "Java",
"bytes": "507948"
},
{
"name": "JavaScript",
"bytes": "22906"
},
{
"name": "Python",
"bytes": "7608990"
},
{
"name": "Rust",
"bytes": "1005243"
},
{
"name": "Scala",
"bytes": "106520"
},
{
"name": "Shell",
"bytes": "105217"
},
{
"name": "Starlark",
"bytes": "489739"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class TickformatValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="tickformat", parent_name="layout.scene.yaxis", **kwargs
):
super(TickformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "style"),
**kwargs
)
| {
"content_hash": "42615baf73e306a72f413ecdf2fcb924",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 82,
"avg_line_length": 34,
"alnum_prop": 0.6029411764705882,
"repo_name": "plotly/python-api",
"id": "129a24f87a8a4d55778e821fef7d492463afb371",
"size": "476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/scene/yaxis/_tickformat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import logging
import logging.config
import constants
logging.config.fileConfig("logging.conf", defaults={"logfilename": constants.logfileName})
| {
"content_hash": "5c2bfeb2238d2274d71948fe778dbf99",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 90,
"avg_line_length": 24.5,
"alnum_prop": 0.8163265306122449,
"repo_name": "crash-g/BoardGameBot",
"id": "fa1527e90b1a157c06352eb1882f5ae6189f5316",
"size": "147",
"binary": false,
"copies": "1",
"ref": "refs/heads/github",
"path": "boardgamebot/init_log.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70996"
}
],
"symlink_target": ""
} |
from typing import Any, Callable, Dict, List, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
location: str, publisher_name: str, type: str, version: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions/{version}",
) # pylint: disable=line-too-long
path_format_arguments = {
"location": _SERIALIZER.url("location", location, "str"),
"publisherName": _SERIALIZER.url("publisher_name", publisher_name, "str"),
"type": _SERIALIZER.url("type", type, "str"),
"version": _SERIALIZER.url("version", version, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_types_request(location: str, publisher_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types",
) # pylint: disable=line-too-long
path_format_arguments = {
"location": _SERIALIZER.url("location", location, "str"),
"publisherName": _SERIALIZER.url("publisher_name", publisher_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_versions_request(
location: str,
publisher_name: str,
type: str,
subscription_id: str,
*,
filter: Optional[str] = None,
top: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions",
) # pylint: disable=line-too-long
path_format_arguments = {
"location": _SERIALIZER.url("location", location, "str"),
"publisherName": _SERIALIZER.url("publisher_name", publisher_name, "str"),
"type": _SERIALIZER.url("type", type, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
if filter is not None:
_params["$filter"] = _SERIALIZER.query("filter", filter, "str")
if top is not None:
_params["$top"] = _SERIALIZER.query("top", top, "int")
if orderby is not None:
_params["$orderby"] = _SERIALIZER.query("orderby", orderby, "str")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class VirtualMachineExtensionImagesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.compute.v2017_03_30.ComputeManagementClient`'s
:attr:`virtual_machine_extension_images` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def get(
self, location: str, publisher_name: str, type: str, version: str, **kwargs: Any
) -> _models.VirtualMachineExtensionImage:
"""Gets a virtual machine extension image.
:param location: The name of a supported Azure region. Required.
:type location: str
:param publisher_name: Required.
:type publisher_name: str
:param type: Required.
:type type: str
:param version: Required.
:type version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineExtensionImage or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2017_03_30.models.VirtualMachineExtensionImage
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.VirtualMachineExtensionImage]
request = build_get_request(
location=location,
publisher_name=publisher_name,
type=type,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("VirtualMachineExtensionImage", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions/{version}"} # type: ignore
@distributed_trace
def list_types(
self, location: str, publisher_name: str, **kwargs: Any
) -> List[_models.VirtualMachineExtensionImage]:
"""Gets a list of virtual machine extension image types.
:param location: The name of a supported Azure region. Required.
:type location: str
:param publisher_name: Required.
:type publisher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineExtensionImage or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineExtensionImage]
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[List[_models.VirtualMachineExtensionImage]]
request = build_list_types_request(
location=location,
publisher_name=publisher_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_types.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("[VirtualMachineExtensionImage]", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_types.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types"} # type: ignore
@distributed_trace
def list_versions(
self,
location: str,
publisher_name: str,
type: str,
filter: Optional[str] = None,
top: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs: Any
) -> List[_models.VirtualMachineExtensionImage]:
"""Gets a list of virtual machine extension image versions.
:param location: The name of a supported Azure region. Required.
:type location: str
:param publisher_name: Required.
:type publisher_name: str
:param type: Required.
:type type: str
:param filter: The filter to apply on the operation. Default value is None.
:type filter: str
:param top: Default value is None.
:type top: int
:param orderby: Default value is None.
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineExtensionImage or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineExtensionImage]
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[List[_models.VirtualMachineExtensionImage]]
request = build_list_versions_request(
location=location,
publisher_name=publisher_name,
type=type,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
orderby=orderby,
api_version=api_version,
template_url=self.list_versions.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("[VirtualMachineExtensionImage]", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_versions.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions"} # type: ignore
| {
"content_hash": "8985918072843579407d9a9a4391f271",
"timestamp": "",
"source": "github",
"line_count": 359,
"max_line_length": 211,
"avg_line_length": 41.362116991643454,
"alnum_prop": 0.648663209643747,
"repo_name": "Azure/azure-sdk-for-python",
"id": "94a3f84e941b0c260346f6b2994fc11e4783138f",
"size": "15349",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/operations/_virtual_machine_extension_images_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0003_auto_20180809_1141'),
]
operations = [
migrations.AddField(
model_name='poll',
name='open',
field=models.BooleanField(default=True),
),
]
| {
"content_hash": "77f8ae944ba7b816b21a8f4b7bdafbe7",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 52,
"avg_line_length": 20.666666666666668,
"alnum_prop": 0.5860215053763441,
"repo_name": "WarwickAnimeSoc/aniMango",
"id": "fdc861c2e3cab9a82e9edb9b679f6d99a71cf737",
"size": "445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polls/migrations/0004_poll_open.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14554"
},
{
"name": "HTML",
"bytes": "145725"
},
{
"name": "JavaScript",
"bytes": "1241"
},
{
"name": "Python",
"bytes": "197412"
}
],
"symlink_target": ""
} |
import sqlite3
import hashlib
import os
import threading
import sqlalchemy.pool as pool
global pas
global ind2
global sql_thread_pool
def menu():
sane = 1
while sane == 1:
print "[ - ] Please enter absolute path to cred. database: "
in_path = raw_input()
if os.path.exists(in_path):
sane = 0
else:
os.system('cls' if os.name == 'nt' else 'clear')
print "[ - ] Invalid path, try again."
return(in_path)
conn = sqlite3.connect(menu(), check_same_thread=False)
conn.text_factory = str
cur1 = conn.cursor()
cur2 = conn.cursor()
cur3 = conn.cursor()
pcur = conn.cursor()
pcur.execute("PRAGMA cache_size=999999")
pcur.execute("PRAGMA journal_mode=WAL")
d_list = []
in_list = []
def is_ascii(s):
return all(ord(c) < 128 for c in s)
cnt = 0
cnt2 = 0
cnt3 = 0
x = cur1.execute("SELECT * FROM main")
for row in x:
try:
if is_ascii(str(row[2])):
if len(str(row[2])) > 4:
if len(str(row[6])) > 4:
pas = str(row[2])
md5 = str(row[6])
ind = str(row[0])
in_list.append(str(ind)+":--:"+str(pas)+":--:"+str(md5))
cnt2+=1
if cnt2 % 1000 == 0:
os.system('cls' if os.name == 'nt' else 'clear')
print "[ + ] "+str(cnt2)+" rainbow table md5s with clear text passes read in..."
except UnicodeEncodeError:
pass
def sqlite_connect():
con = sqlite3.connect(menu(), check_same_thread=False)
con.text_factory = str
return con
sql_thread_pool = pool.QueuePool(sqlite_connect, max_overflow=20, pool_size=10)
def queryMain(pas, ind2):
t_conn = sql_thread_pool.connect()
cursor = t_conn.cursor()
cursor.execute("UPDATE main SET clearTextP = '"+str(pas)+"' WHERE pri_Index = '"+str(ind2)+"'")
t_conn.close()
for item in in_list:
try:
item = item.split(":--:")
md5 = item[2]
ind = item[0]
pas = item[1]
while threading.active_count() >= 10:
c = 1
if md5 not in d_list:
y = cur2.execute("SELECT * FROM main WHERE srcMD5='"+md5+"'")
d_list.append(md5)
cur3.execute("begin")
for row2 in y:
ind2 = str(row2[0])
query_t = threading.Thread(target=queryMain, args=(pas, ind2, ))
query_t.start()
cnt+=1
if cnt % 10 == 0:
os.system('cls' if os.name == 'nt' else 'clear')
print "[ + ] "+str(cnt2)+" md5s with cleartext passes read in..."
print "[ + ] "+str(cnt)+" clear text passes found and added to records."
print "[ + ] "+str(len(d_list))+" md5s added to list."
print "[ + ] "+str(cnt3)+" exceptions caught."
except KeyboardInterrupt:
print "CTRL+C caught..."
break
except:
cnt3+=1
raw_input("Press enter to commit records...")
conn.commit()
| {
"content_hash": "5c59b41c32e3a16d2d795de4eeed778e",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 96,
"avg_line_length": 25.6,
"alnum_prop": 0.5974702380952381,
"repo_name": "0xhughes/credential_db",
"id": "73aaa339bfcef2f6303340b4308779265a94531b",
"size": "2688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rain_to_clear_threading.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11847"
}
],
"symlink_target": ""
} |
class WordHist(dict):
def append(self, d1):
for key, value in d1.items():
key = key.lower()
self[key] = self[key] + value if key in self else value
def get_tf(self, querie_word):
word_count = 0.0
for key, value in self.items():
if querie_word in key:
word_count += value
return word_count / self.number_of_words() if self.values() else 0
def number_of_words(self):
return sum(self.values())
'''
@return: All keys where a query term is part of: e.g: abc -> {abcd, abce}
'''
def query_to_keys(self, query):
return set([key for key in self.keys() for word in query.split() if word in key])
def keys_to_query(self):
query = ""
for key, value in self.items():
query += key.lower() + " "
return query
| {
"content_hash": "6e733602ba9305027c0360be5ee05ad4",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 89,
"avg_line_length": 26.454545454545453,
"alnum_prop": 0.5441008018327605,
"repo_name": "thomasmauerhofer/search-engine",
"id": "610abe4dc3b3c7751a7814024423e915e7be8c77",
"size": "916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/engine/utils/objects/word_hist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1839"
},
{
"name": "HTML",
"bytes": "57465"
},
{
"name": "JavaScript",
"bytes": "7199"
},
{
"name": "Python",
"bytes": "149575"
},
{
"name": "Shell",
"bytes": "252"
}
],
"symlink_target": ""
} |
import pytest
from feedrsub.feeds.search_manager import FeedSearchManager
from feedrsub.models.feed import STATUS, Feed
from feedrsub.models.feedinfo_schema import FeedInfo
from tests.factories import FeedFactory
@pytest.fixture(scope="function")
def feedsearchmanager():
return FeedSearchManager()
@pytest.fixture(scope="function")
def feed_pending(session):
return FeedFactory(topic="http://test.com/pendingfeed", status=STATUS.PENDING_SUB)
@pytest.fixture(scope="function")
def feed_excluded(session):
return FeedFactory(topic="http://test.com/excludedfeed", status=STATUS.EXCLUDED)
@pytest.fixture(scope="function")
def feed_subscribed(session):
return FeedFactory(topic="http://test.com/subscribedfeed", status=STATUS.SUBSCRIBED)
@pytest.fixture(scope="function")
def feed_unsubscribed(session):
return FeedFactory(
topic="http://test.com/unsubscribedfeed", status=STATUS.UNSUBSCRIBED
)
@pytest.fixture(scope="function")
def feeds(session, db, feed_pending, feed_excluded, feed_subscribed, feed_unsubscribed):
feeds = [feed_pending, feed_excluded, feed_subscribed, feed_unsubscribed]
db.session.add_all(feeds)
return feeds
def test_get_naked_domain(feedsearchmanager):
result1 = feedsearchmanager.get_naked_domain("www.test.com")
assert result1 == "test.com"
result2 = feedsearchmanager.get_naked_domain("test.com")
assert result2 == "test.com"
def test_is_comment_feed(feedsearchmanager):
assert feedsearchmanager.is_comment_feed("http://test.com/comments")
assert feedsearchmanager.is_comment_feed("http://test.com/comments/feed")
assert feedsearchmanager.is_comment_feed("http://test.com/feed") is False
assert feedsearchmanager.is_comment_feed("http://test.com/comment")
def test_is_excluded(session, feedsearchmanager):
feedsearchmanager.excluded = ["auctorial.com", "test.com"]
assert feedsearchmanager.is_excluded("http://www.test.com")
assert feedsearchmanager.is_excluded("http://www.test2.com") is False
assert feedsearchmanager.is_excluded("https://auctorial.com/test")
def test_is_valid_feed_info_comment_feed(session, feedsearchmanager):
info = FeedInfo(url="http://test.com/comments")
result = feedsearchmanager.is_valid_feed_info(info)
assert result is False
def test_is_valid_feed_info_already_exists(session, feedsearchmanager):
url = "http://test.com"
feedsearchmanager.urls = [url]
info = FeedInfo(url=url)
assert feedsearchmanager.is_valid_feed_info(info) is False
def test_is_valid_feed_info(session, feedsearchmanager):
feedsearchmanager.urls = ["http://test.com"]
info = FeedInfo(url="http://test.com/feed")
assert feedsearchmanager.is_valid_feed_info(info)
def test_process_feed_info_pending(session, feedsearchmanager, feed_pending):
url = feed_pending.topic
info = FeedInfo(url=url, title="Test")
info.subscribed = False
feed = Feed.query.filter_by(topic=url).first()
assert feed.title != info.title
feedsearchmanager.process_feed_info(info, feed)
assert len(feedsearchmanager.feed_info_list) == 1
assert feedsearchmanager.feed_info_list == [info]
assert len(feedsearchmanager.urls) == 1
assert feedsearchmanager.urls == [info.url]
assert len(feedsearchmanager.excluded) == 0
assert info.subscribed is True
feed = Feed.query.filter_by(topic=url).first()
assert feed.title == info.title
def test_process_feed_info_subscribed(session, feedsearchmanager, feed_subscribed):
url = feed_subscribed.topic
info = FeedInfo(url=url, title="Test")
info.subscribed = False
feed = Feed.query.filter_by(topic=url).first()
assert feed.title != info.title
feedsearchmanager.process_feed_info(info, feed)
assert len(feedsearchmanager.feed_info_list) == 1
assert feedsearchmanager.feed_info_list == [info]
assert len(feedsearchmanager.urls) == 1
assert feedsearchmanager.urls == [info.url]
assert len(feedsearchmanager.excluded) == 0
assert info.subscribed is True
feed = Feed.query.filter_by(topic=url).first()
assert feed.title == info.title
def test_process_feed_info_excluded(session, feedsearchmanager, feed_excluded):
# # feed = lambda f: f.status == STATUS.EXCLUDED, feeds)
# feed = [f for f in feeds if f.status == STATUS.EXCLUDED][0]
# print(feed)
# # url = 'http://test2.com/feed'
# print(feed.topic)
url = feed_excluded.topic
info = FeedInfo(url=url, title="Test")
info.subscribed = False
assert feed_excluded.title != info.title
feedsearchmanager.process_feed_info(info, feed_excluded)
assert len(feedsearchmanager.feed_info_list) == 0
assert len(feedsearchmanager.urls) == 0
assert len(feedsearchmanager.excluded) == 1
assert feedsearchmanager.excluded == [info.url]
assert info.subscribed is False
feed = Feed.query.filter_by(topic=url).first()
assert feed.title == info.title
def test_get_existing_feeds(session, feedsearchmanager):
feed1 = Feed(topic="http://testing.com")
feed2 = Feed(topic="https://example.net")
feed3 = Feed(topic="https://example.com/examplefeed")
session.add(feed1)
session.add(feed2)
session.add(feed3)
session.commit()
urls = ["testing.com", "example.net"]
found = feedsearchmanager.get_existing_feeds(urls)
assert 2 == len(found)
| {
"content_hash": "115f5b2721cf4bdb58258aa5cd33f5c2",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 88,
"avg_line_length": 31.36046511627907,
"alnum_prop": 0.7189469781238413,
"repo_name": "DBeath/flask-feedrsub",
"id": "828bbecddc37f18ef8884827c5651c5d98c01ea1",
"size": "5394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/feeds/search_manager_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4337"
},
{
"name": "Dockerfile",
"bytes": "1105"
},
{
"name": "HTML",
"bytes": "60608"
},
{
"name": "JavaScript",
"bytes": "24058"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "815501"
},
{
"name": "Shell",
"bytes": "6364"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from cctbx import sgtbx
from scitbx import matrix
from xia2.Experts.LatticeExpert import lattice_to_spacegroup
def _multiply_symmetry_matrix(a, b):
"""compute a * b, for e.g. h_ = a * b * h, e.g. apply b before a."""
return (matrix.sqr(a) * matrix.sqr(b)).elems
def r_to_rt(r):
"""Convert R matrix to RT, assuming T=0."""
result = []
for i in range(3):
for j in range(3):
result.append(r[i * 3 + j])
result.append(0)
return result
def rt_to_r(rt):
"""Convert RT matrix to R, removing T."""
result = []
for i in range(3):
for j in range(3):
result.append(rt[4 * i + j])
return result
def compose_symops(a, b):
"""Compose operation c, which is applying b then a."""
return (sgtbx.change_of_basis_op(b) * sgtbx.change_of_basis_op(a)).as_hkl()
def symop_to_mat(symop):
return matrix.sqr(sgtbx.change_of_basis_op(symop).c().as_double_array()[:9]).elems
def mat_to_symop(mat):
return sgtbx.change_of_basis_op(
sgtbx.rt_mx(matrix.sqr(mat), (0, 0, 0), r_den=12, t_den=144)
).as_hkl()
def lattice_to_spacegroup_number(lattice):
"""Return the spacegroup number corresponding to the lowest symmetry
possible for a given Bravais lattice."""
if lattice not in lattice_to_spacegroup:
raise RuntimeError("lattice %s unknown" % lattice)
return lattice_to_spacegroup[lattice]
| {
"content_hash": "311c1b635f8aa74cd47c36c668b9cc47",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 86,
"avg_line_length": 23.532258064516128,
"alnum_prop": 0.6271418779986292,
"repo_name": "xia2/xia2",
"id": "01d951300eba29610090fbef36fa2bb9bcf22d4c",
"size": "1459",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/xia2/Experts/SymmetryExpert.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3958"
},
{
"name": "HTML",
"bytes": "35830"
},
{
"name": "Python",
"bytes": "1857098"
}
],
"symlink_target": ""
} |
import cookielib
import copy
import httplib
import os
import json
import pickle
import Queue
import random
import re
import socket
import sys
import time
import urllib2
import urlparse
from threading import Thread
from task import *
import config
GENERATE_RESULTS = config.GENERATE_RESULTS
COOKIES_ENABLED = config.COOKIES_ENABLED
HTTP_DEBUG = config.HTTP_DEBUG
SHUFFLE_TESTCASES = config.SHUFFLE_TESTCASES
WAITFOR_AGENT_FINISH = config.WAITFOR_AGENT_FINISH
SOCKET_TIMEOUT = config.SOCKET_TIMEOUT
class TaskAgent(Thread):
def __init__(self, id, runtime_stats, task, signal):
Thread.__init__(self)
socket.setdefaulttimeout(SOCKET_TIMEOUT)
self.id = id
self.runtime_stats = runtime_stats
self.running = True
self.count = 0
self.error_count = 0
self.default_timer = time.time
self.trace_logging = False
self.task = task
self.signal = signal
def run(self):
agent_start_time = time.strftime('%H:%M:%S', time.localtime())
total_latency = 0
total_connect_latency = 0
total_bytes = 0
while self.running:
if self.signal['setted']:
self.cookie_jar = cookielib.CookieJar()
resp, content, req_start_time, req_end_time, connect_end_time = self.send(self.task)
self.count += 1
if resp.code != 200:
self.error_count += 1
self.task.error = resp.code
self.task.result = content
else:
(r, emsg) = self.task.verify(content)
if not r:
self.error_count += 1
self.task.error = -1000
self.task.result = '发生错误:\n【%s】\n实际返回:\n%s' % (emsg, content)
else:
self.task.result = content
latency = (req_end_time - req_start_time)
connect_latency = (connect_end_time - req_start_time)
resp_bytes = len(content)
total_bytes += resp_bytes
total_latency += latency
total_connect_latency += connect_latency
self.runtime_stats[self.id] = StatCollection(
resp.code, resp.msg, latency, self.count, self.error_count,
total_latency, total_connect_latency, total_bytes
)
self.runtime_stats[self.id].agent_start_time = agent_start_time
if self.error_count > 0:
break
if not self.task.loop:
break
else:
time.sleep(0.01)
def stop(self):
self.running = False
def send(self, req):
if HTTP_DEBUG:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie_jar), urllib2.HTTPHandler(debuglevel=1))
elif COOKIES_ENABLED:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie_jar))
else:
opener = urllib2.build_opener()
if req.method.upper() == 'POST':
request = urllib2.Request(req.url, req.body, req.headers)
else:
request = urllib2.Request(req.url, None, req.headers)
req_start_time = self.default_timer()
try:
resp = opener.open(request)
connect_end_time = self.default_timer()
content = resp.read()
req_end_time = self.default_timer()
except httplib.HTTPException, e:
connect_end_time = self.default_timer()
resp = ErrorResponse()
resp.code = 0
resp.msg = str(e)
resp.headers = {}
content = ''
except urllib2.HTTPError, e:
connect_end_time = self.default_timer()
resp = ErrorResponse()
resp.code = e.code
resp.msg = httplib.responses[e.code]
resp.headers = dict(e.info())
content = e.read()
except urllib2.URLError, e:
connect_end_time = self.default_timer()
resp = ErrorResponse()
resp.code = 0
resp.msg = str(e.reason)
resp.headers = {}
content = ''
except IOError, e:
connect_end_time = self.default_timer()
resp = ErrorResponse()
resp.code = 0
resp.msg = str(e)
resp.headers = {}
resp.content = 'IOError'
content = ''
except Exception, e:
connect_end_time = self.default_timer()
resp = ErrorResponse()
resp.code = 0
resp.msg = str(e)
resp.headers = {}
resp.content = 'Exception'
content = ''
req_end_time = self.default_timer()
if self.trace_logging:
self.log_http_msgs(req, request, resp, content)
return (resp, content, req_start_time, req_end_time, connect_end_time)
class LoadManager(Thread):
def __init__(self, tasks, runtime_stats, error_queue, output_dir = None, test_name = None):
Thread.__init__(self)
socket.setdefaulttimeout(SOCKET_TIMEOUT)
self.running = True
self.tasks = tasks
self.num_agents = len(tasks)
self.runtime_stats = runtime_stats
self.error_queue = error_queue
self.test_name = test_name
if output_dir and test_name:
self.output_dir = time.strftime(output_dir + '/' + test_name + '_' + 'results_%Y.%m.%d_%H.%M.%S', time.localtime())
elif output_dir:
self.output_dir = time.strftime(output_dir + '/' + 'results_%Y.%m.%d_%H.%M.%S', time.localtime())
elif test_name:
self.output_dir = time.strftime('results/' + test_name + '_' + 'results_%Y.%m.%d_%H.%M.%S', time.localtime())
else:
self.output_dir = time.strftime('results/results_%Y.%m.%d_%H.%M.%S', time.localtime())
for i in range(self.num_agents):
self.runtime_stats[i] = StatCollection(0, '', 0, 0, 0, 0, 0, 0)
self.results_queue = Queue.Queue()
self.agent_refs = []
self.msg_queue = []
def run(self):
self.running = True
self.agents_started = False
try:
os.makedirs(self.output_dir, 0755)
except OSError:
self.output_dir = self.output_dir + time.strftime('/results_%Y.%m.%d_%H.%M.%S', time.localtime())
try:
os.makedirs(self.output_dir, 0755)
except OSError:
sys.stderr.write('ERROR: Can not create output directory\n')
sys.exit(1)
self.results_writer = ResultWriter(self.results_queue, self.output_dir)
self.results_writer.setDaemon(True)
self.results_writer.start()
signal = {
'setted': False
}
print('-------------------------------------------------')
print('开始启动并发测试')
for i in range(self.num_agents):
if self.running:
agent = TaskAgent(i, self.runtime_stats, self.tasks[i], signal)
agent.start()
self.agent_refs.append(agent)
agent_started_line = u'激活虚拟用户 %d 个' % (i + 1)
if sys.platform.startswith('win'):
sys.stdout.write(chr(0x08) * len(agent_started_line))
sys.stdout.write(agent_started_line)
else:
esc = chr(27) # escape key
sys.stdout.write(esc + '[G' )
sys.stdout.write(esc + '[A' )
sys.stdout.write(agent_started_line + '\n')
signal['setted'] = True
if sys.platform.startswith('win'):
sys.stdout.write('\n')
print '开始测试 ...\n'
self.agents_started = True
def stop(self, wait = True):
self.running = False
for agent in self.agent_refs:
agent.stop()
if wait:
keep_running = True
while keep_running:
keep_running = False
for agent in self.agent_refs:
if agent.isAlive():
keep_running = True
time.sleep(0.1)
self.results_writer.stop()
class ErrorResponse():
def __init__(self):
self.code = 0
self.msg = 'Connection error'
self.headers = {}
class StatCollection():
def __init__(self, status, reason, latency, count, error_count, total_latency, total_connect_latency, total_bytes):
self.status = status
self.reason = reason
self.latency = latency
self.count = count
self.error_count = error_count
self.total_latency = total_latency
self.total_connect_latency = total_connect_latency
self.total_bytes = total_bytes
self.agent_start_time = None
if count > 0:
self.avg_latency = (total_latency / count)
self.avg_connect_latency = (total_connect_latency / count)
else:
self.avg_latency = 0
self.avg_connect_latency = 0
class ResultWriter(Thread):
def __init__(self, results_queue, output_dir):
Thread.__init__(self)
self.running = True
self.results_queue = results_queue
self.output_dir = output_dir
def run(self):
fh = open('%s/agent_stats.csv' % self.output_dir, 'w')
fh.close()
while self.running:
try:
q_tuple = self.results_queue.get(False)
f = open('%s/agent_stats.csv' % self.output_dir, 'a')
f.write('%s,%s,%s,%s,%s,%d,%s,%d,%f,%f,%s\n' % q_tuple) # log as csv
f.flush()
f.close()
except Queue.Empty:
time.sleep(.10)
def stop(self):
self.running = False
| {
"content_hash": "2da571fcb2baece04f1ca69edccb6a57",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 127,
"avg_line_length": 33.505050505050505,
"alnum_prop": 0.5304994472917295,
"repo_name": "hishopdc/pybench",
"id": "1fc583cb357e4bc81dd36e340f5548cc54d61b12",
"size": "10058",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "loader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "56044"
}
],
"symlink_target": ""
} |
from unittest import mock
import twisted.web.client
from twisted.internet import defer
from twisted.test.proto_helpers import MemoryReactor
from synapse.api.room_versions import RoomVersions
from synapse.events import EventBase
from synapse.rest import admin
from synapse.rest.client import login, room
from synapse.server import HomeServer
from synapse.util import Clock
from tests.test_utils import FakeResponse, event_injection
from tests.unittest import FederatingHomeserverTestCase
class FederationClientTest(FederatingHomeserverTestCase):
servlets = [
admin.register_servlets,
room.register_servlets,
login.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer):
super().prepare(reactor, clock, homeserver)
# mock out the Agent used by the federation client, which is easier than
# catching the HTTPS connection and do the TLS stuff.
self._mock_agent = mock.create_autospec(twisted.web.client.Agent, spec_set=True)
homeserver.get_federation_http_client().agent = self._mock_agent
# Move clock up to somewhat realistic time so the PDU destination retry
# works (`now` needs to be larger than `0 + PDU_RETRY_TIME_MS`).
self.reactor.advance(1000000000)
self.creator = f"@creator:{self.OTHER_SERVER_NAME}"
self.test_room_id = "!room_id"
def test_get_room_state(self):
# mock up some events to use in the response.
# In real life, these would have things in `prev_events` and `auth_events`, but that's
# a bit annoying to mock up, and the code under test doesn't care, so we don't bother.
create_event_dict = self.add_hashes_and_signatures_from_other_server(
{
"room_id": self.test_room_id,
"type": "m.room.create",
"state_key": "",
"sender": self.creator,
"content": {"creator": self.creator},
"prev_events": [],
"auth_events": [],
"origin_server_ts": 500,
}
)
member_event_dict = self.add_hashes_and_signatures_from_other_server(
{
"room_id": self.test_room_id,
"type": "m.room.member",
"sender": self.creator,
"state_key": self.creator,
"content": {"membership": "join"},
"prev_events": [],
"auth_events": [],
"origin_server_ts": 600,
}
)
pl_event_dict = self.add_hashes_and_signatures_from_other_server(
{
"room_id": self.test_room_id,
"type": "m.room.power_levels",
"sender": self.creator,
"state_key": "",
"content": {},
"prev_events": [],
"auth_events": [],
"origin_server_ts": 700,
}
)
# mock up the response, and have the agent return it
self._mock_agent.request.side_effect = lambda *args, **kwargs: defer.succeed(
FakeResponse.json(
payload={
"pdus": [
create_event_dict,
member_event_dict,
pl_event_dict,
],
"auth_chain": [
create_event_dict,
member_event_dict,
],
}
)
)
# now fire off the request
state_resp, auth_resp = self.get_success(
self.hs.get_federation_client().get_room_state(
"yet.another.server",
self.test_room_id,
"event_id",
RoomVersions.V9,
)
)
# check the right call got made to the agent
self._mock_agent.request.assert_called_once_with(
b"GET",
b"matrix://yet.another.server/_matrix/federation/v1/state/%21room_id?event_id=event_id",
headers=mock.ANY,
bodyProducer=None,
)
# ... and that the response is correct.
# the auth_resp should be empty because all the events are also in state
self.assertEqual(auth_resp, [])
# all of the events should be returned in state_resp, though not necessarily
# in the same order. We just check the type on the assumption that if the type
# is right, so is the rest of the event.
self.assertCountEqual(
[e.type for e in state_resp],
["m.room.create", "m.room.member", "m.room.power_levels"],
)
def test_get_pdu_returns_nothing_when_event_does_not_exist(self):
"""No event should be returned when the event does not exist"""
pulled_pdu_info = self.get_success(
self.hs.get_federation_client().get_pdu(
["yet.another.server"],
"event_should_not_exist",
RoomVersions.V9,
)
)
self.assertEqual(pulled_pdu_info, None)
def test_get_pdu(self):
"""Test to make sure an event is returned by `get_pdu()`"""
self._get_pdu_once()
def test_get_pdu_event_from_cache_is_pristine(self):
"""Test that modifications made to events returned by `get_pdu()`
do not propagate back to to the internal cache (events returned should
be a copy).
"""
# Get the PDU in the cache
remote_pdu = self._get_pdu_once()
# Modify the the event reference.
# This change should not make it back to the `_get_pdu_cache`.
remote_pdu.internal_metadata.outlier = True
# Get the event again. This time it should read it from cache.
pulled_pdu_info2 = self.get_success(
self.hs.get_federation_client().get_pdu(
["yet.another.server"],
remote_pdu.event_id,
RoomVersions.V9,
)
)
self.assertIsNotNone(pulled_pdu_info2)
remote_pdu2 = pulled_pdu_info2.pdu
# Sanity check that we are working against the same event
self.assertEqual(remote_pdu.event_id, remote_pdu2.event_id)
# Make sure the event does not include modification from earlier
self.assertIsNotNone(remote_pdu2)
self.assertEqual(remote_pdu2.internal_metadata.outlier, False)
def _get_pdu_once(self) -> EventBase:
"""Retrieve an event via `get_pdu()` and assert that an event was returned.
Also used to prime the cache for subsequent test logic.
"""
message_event_dict = self.add_hashes_and_signatures_from_other_server(
{
"room_id": self.test_room_id,
"type": "m.room.message",
"sender": self.creator,
"state_key": "",
"content": {},
"prev_events": [],
"auth_events": [],
"origin_server_ts": 700,
"depth": 10,
}
)
# mock up the response, and have the agent return it
self._mock_agent.request.side_effect = lambda *args, **kwargs: defer.succeed(
FakeResponse.json(
payload={
"origin": "yet.another.server",
"origin_server_ts": 900,
"pdus": [
message_event_dict,
],
}
)
)
pulled_pdu_info = self.get_success(
self.hs.get_federation_client().get_pdu(
["yet.another.server"],
"event_id",
RoomVersions.V9,
)
)
self.assertIsNotNone(pulled_pdu_info)
remote_pdu = pulled_pdu_info.pdu
# check the right call got made to the agent
self._mock_agent.request.assert_called_once_with(
b"GET",
b"matrix://yet.another.server/_matrix/federation/v1/event/event_id",
headers=mock.ANY,
bodyProducer=None,
)
self.assertIsNotNone(remote_pdu)
self.assertEqual(remote_pdu.internal_metadata.outlier, False)
return remote_pdu
def test_backfill_invalid_signature_records_failed_pull_attempts(
self,
) -> None:
"""
Test to make sure that events from /backfill with invalid signatures get
recorded as failed pull attempts.
"""
OTHER_USER = f"@user:{self.OTHER_SERVER_NAME}"
main_store = self.hs.get_datastores().main
# Create the room
user_id = self.register_user("kermit", "test")
tok = self.login("kermit", "test")
room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
# We purposely don't run `add_hashes_and_signatures_from_other_server`
# over this because we want the signature check to fail.
pulled_event, _ = self.get_success(
event_injection.create_event(
self.hs,
room_id=room_id,
sender=OTHER_USER,
type="test_event_type",
content={"body": "garply"},
)
)
# We expect an outbound request to /backfill, so stub that out
self._mock_agent.request.side_effect = lambda *args, **kwargs: defer.succeed(
FakeResponse.json(
payload={
"origin": "yet.another.server",
"origin_server_ts": 900,
# Mimic the other server returning our new `pulled_event`
"pdus": [pulled_event.get_pdu_json()],
}
)
)
self.get_success(
self.hs.get_federation_client().backfill(
# We use "yet.another.server" instead of
# `self.OTHER_SERVER_NAME` because we want to see the behavior
# from `_check_sigs_and_hash_and_fetch_one` where it tries to
# fetch the PDU again from the origin server if the signature
# fails. Just want to make sure that the failure is counted from
# both code paths.
dest="yet.another.server",
room_id=room_id,
limit=1,
extremities=[pulled_event.event_id],
),
)
# Make sure our failed pull attempt was recorded
backfill_num_attempts = self.get_success(
main_store.db_pool.simple_select_one_onecol(
table="event_failed_pull_attempts",
keyvalues={"event_id": pulled_event.event_id},
retcol="num_attempts",
)
)
# This is 2 because it failed once from `self.OTHER_SERVER_NAME` and the
# other from "yet.another.server"
self.assertEqual(backfill_num_attempts, 2)
| {
"content_hash": "d9f22c6a01dd06300c30eefefca7420e",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 100,
"avg_line_length": 37.43344709897611,
"alnum_prop": 0.5447665937272064,
"repo_name": "matrix-org/synapse",
"id": "e67f4058260f561269839693d4422225812fa2a2",
"size": "11560",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/federation/test_federation_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7229"
},
{
"name": "Dockerfile",
"bytes": "9316"
},
{
"name": "Gherkin",
"bytes": "441"
},
{
"name": "HTML",
"bytes": "66000"
},
{
"name": "JavaScript",
"bytes": "15635"
},
{
"name": "Jinja",
"bytes": "7687"
},
{
"name": "Lua",
"bytes": "241"
},
{
"name": "Perl",
"bytes": "28191"
},
{
"name": "Python",
"bytes": "10632037"
},
{
"name": "Rust",
"bytes": "57034"
},
{
"name": "Shell",
"bytes": "53124"
}
],
"symlink_target": ""
} |
"""
Post Markup
Author: Will McGugan (http://www.willmcgugan.com)
pyClanSphere-related addons by pyClanSphere Team
"""
__version__ = "1.1.5dev-pyClanSphere"
import re
from urllib import quote, unquote, quote_plus, urlencode
from urlparse import urlparse, urlunparse
pygments_available = True
try:
from pygments import highlight
from pygments.lexers import get_lexer_by_name, ClassNotFound
from pygments.formatters import HtmlFormatter
except ImportError:
# Make Pygments optional
pygments_available = False
def annotate_link(domain):
"""This function is called by the url tag. Override to disable or change behaviour.
domain -- Domain parsed from url
"""
return u" [%s]"%_escape(domain)
_re_url = re.compile(r"((https?):((//)|(\\\\))+[\w\d:#@%/;$()~_?\+-=\\\.&]*)", re.MULTILINE|re.UNICODE)
_re_html=re.compile('<.*?>|\&.*?\;', re.UNICODE|re.DOTALL)
def textilize(s):
"""Remove markup from html"""
s = s.replace("<p>", " ")
return _re_html.sub("", s)
_re_excerpt = re.compile(r'\[".*?\]+?.*?\[/".*?\]+?', re.DOTALL|re.UNICODE)
_re_remove_markup = re.compile(r'\[.*?\]', re.DOTALL|re.UNICODE)
_re_break_groups = re.compile(r'[\r\n]+', re.DOTALL|re.UNICODE)
def get_excerpt(post):
"""Returns an excerpt between ["] and [/"]
post -- BBCode string"""
match = _re_excerpt.search(post)
if match is None:
return ""
excerpt = match.group(0)
excerpt = excerpt.replace(u'\n', u"<br/>")
return _re_remove_markup.sub("", excerpt)
def strip_bbcode(bbcode):
"""Strips bbcode tags from a string.
bbcode -- A string to remove tags from
"""
return u"".join([t[1] for t in PostMarkup.tokenize(bbcode) if t[0] == PostMarkup.TOKEN_TEXT])
def create(include=None, exclude=None, use_pygments=True, **kwargs):
"""Create a postmarkup object that converts bbcode to XML snippets. Note
that creating postmarkup objects is _not_ threadsafe, but rendering the
html _is_ threadsafe. So typically you will need just one postmarkup instance
to render the bbcode accross threads.
include -- List or similar iterable containing the names of the tags to use
If omitted, all tags will be used
exclude -- List or similar iterable containing the names of the tags to exclude.
If omitted, no tags will be excluded
use_pygments -- If True, Pygments (http://pygments.org/) will be used for the code tag,
otherwise it will use <pre>code</pre>
kwargs -- Remaining keyword arguments are passed to tag constructors.
"""
postmarkup = PostMarkup()
postmarkup_add_tag = postmarkup.tag_factory.add_tag
def add_tag(tag_class, name, *args, **kwargs):
if include is None or name in include:
if exclude is not None and name in exclude:
return
postmarkup_add_tag(tag_class, name, *args, **kwargs)
add_tag(SimpleTag, 'b', 'strong')
add_tag(SimpleTag, 'i', 'em')
add_tag(SimpleTag, 'u', 'u')
add_tag(SimpleTag, 's', 'strike')
add_tag(LinkTag, 'link', **kwargs)
add_tag(LinkTag, 'url', **kwargs)
add_tag(QuoteTag, 'quote')
add_tag(SearchTag, u'wiki',
u"http://en.wikipedia.org/wiki/Special:Search?search=%s", u'wikipedia.com', **kwargs)
add_tag(SearchTag, u'google',
u"http://www.google.com/search?hl=en&q=%s&btnG=Google+Search", u'google.com', **kwargs)
add_tag(SearchTag, u'dictionary',
u"http://dictionary.reference.com/browse/%s", u'dictionary.com', **kwargs)
add_tag(SearchTag, u'dict',
u"http://dictionary.reference.com/browse/%s", u'dictionary.com', **kwargs)
add_tag(ImgTag, u'img')
add_tag(ListTag, u'list')
add_tag(ListItemTag, u'*')
add_tag(SizeTag, u"size")
add_tag(ColorTag, u"color")
add_tag(FontTag, u"font")
add_tag(CenterTag, u"center")
add_tag(LeftTag, u"left")
add_tag(RightTag, u"right")
add_tag(NoparseTag, u"noparse")
add_tag(ThreadLinkTag, u"thread")
add_tag(PostLinkTag, u"post")
if use_pygments:
assert pygments_available, "Install Pygments (http://pygments.org/) or call create with use_pygments=False"
add_tag(PygmentsCodeTag, u'code', **kwargs)
else:
add_tag(CodeTag, u'code', **kwargs)
add_tag(ParagraphTag, u"p")
return postmarkup
class TagBase(object):
def __init__(self, name, enclosed=False, auto_close=False, inline=False, strip_first_newline=False, **kwargs):
"""Base class for all tags.
name -- The name of the bbcode tag
enclosed -- True if the contents of the tag should not be bbcode processed.
auto_close -- True if the tag is standalone and does not require a close tag.
inline -- True if the tag generates an inline html tag.
"""
self.name = name
self.enclosed = enclosed
self.auto_close = auto_close
self.inline = inline
self.strip_first_newline = strip_first_newline
self.open_pos = None
self.close_pos = None
self.open_node_index = None
self.close_node_index = None
def open(self, parser, params, open_pos, node_index):
""" Called when the open tag is initially encountered. """
self.params = params
self.open_pos = open_pos
self.open_node_index = node_index
def close(self, parser, close_pos, node_index):
""" Called when the close tag is initially encountered. """
self.close_pos = close_pos
self.close_node_index = node_index
def render_open(self, parser, node_index):
""" Called to render the open tag. """
pass
def render_close(self, parser, node_index):
""" Called to render the close tag. """
pass
def get_contents(self, parser):
"""Returns the string between the open and close tag."""
return parser.markup[self.open_pos:self.close_pos]
def get_contents_text(self, parser):
"""Returns the string between the the open and close tag, minus bbcode tags."""
return u"".join( parser.get_text_nodes(self.open_node_index, self.close_node_index) )
def skip_contents(self, parser):
"""Skips the contents of a tag while rendering."""
parser.skip_to_node(self.close_node_index)
def __str__(self):
return '[%s]'%self.name
class SimpleTag(TagBase):
"""A tag that can be rendered with a simple substitution. """
def __init__(self, name, html_name, **kwargs):
""" html_name -- the html tag to substitute."""
TagBase.__init__(self, name, inline=True)
self.html_name = html_name
def render_open(self, parser, node_index):
tag_data = parser.tag_data
tag_key = "SimpleTag.%s_nest_level"%self.html_name
nest_level = tag_data[tag_key] = tag_data.setdefault(tag_key, 0) + 1
if nest_level > 1:
return u""
return u"<%s>"%self.html_name
def render_close(self, parser, node_index):
tag_data = parser.tag_data
tag_key = "SimpleTag.%s_nest_level"%self.html_name
tag_data[tag_key] -= 1
if tag_data[tag_key] > 0:
return u''
return u"</%s>"%self.html_name
class DivStyleTag(TagBase):
"""A simple tag that is replaces with a div and a style."""
def __init__(self, name, style, value, **kwargs):
TagBase.__init__(self, name)
self.style = style
self.value = value
def render_open(self, parser, node_index):
return u'<div style="%s:%s;">' % (self.style, self.value)
def render_close(self, parser, node_index):
return u'</div>'
class LinkTag(TagBase):
_safe_chars = frozenset('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789'
'_.-=/&?:%&#')
_re_domain = re.compile(r"//([a-z0-9-\.]*)", re.UNICODE)
def __init__(self, name, annotate_links=True, **kwargs):
TagBase.__init__(self, name, inline=True)
self.annotate_links = annotate_links
def render_open(self, parser, node_index):
self.domain = u''
tag_data = parser.tag_data
nest_level = tag_data['link_nest_level'] = tag_data.setdefault('link_nest_level', 0) + 1
if nest_level > 1:
return u""
if self.params:
url = self.params.strip()
else:
url = self.get_contents_text(parser).strip()
url = _unescape(url)
self.domain = ""
if u"javascript:" in url.lower():
return ""
if ':' not in url:
url = 'http://' + url
scheme, uri = url.split(':', 1)
if scheme not in ['http', 'https', 'ftp']:
return u''
try:
domain = self._re_domain.search(uri.lower()).group(1)
except IndexError:
return u''
domain = domain.lower()
if domain.startswith('www.'):
domain = domain[4:]
def percent_encode(s):
safe_chars = self._safe_chars
def replace(c):
if c not in safe_chars:
return "%%%02X"%ord(c)
else:
return c
return "".join([replace(c) for c in s])
self.url = percent_encode(url.encode('utf-8', 'replace'))
self.domain = domain
if not self.url:
return u""
if self.domain:
return u'<a href="%s">'%self.url
else:
return u""
def render_close(self, parser, node_index):
tag_data = parser.tag_data
tag_data['link_nest_level'] -= 1
if tag_data['link_nest_level'] > 0:
return u''
if self.domain:
if self.params:
return u'</a>'+self.annotate_link(self.domain)
else:
return u'</a>'
else:
return u''
def annotate_link(self, domain=None):
if domain and self.annotate_links:
return annotate_link(domain)
else:
return u""
class ThreadLinkTag(LinkTag):
def __init__(self, name, **kwargs):
LinkTag.__init__(self, name, annotate_links=False)
def render_open(self, parser, node_index):
self.threadid = None
tag_data = parser.tag_data
nest_level = tag_data['link_nest_level'] = tag_data.setdefault('link_nest_level', 0) + 1
if nest_level > 1:
return u''
try:
if self.params:
self.threadid = int(self.params.strip())
else:
self.threadid = int(self.get_contents_text(parser).strip())
except ValueError:
return u''
from pyClanSphere.api import url_for
from pyClanSphere.plugins.bulletin_board.models import Topic
topic = Topic.query.get(self.threadid)
if topic is None:
self.threadid = None
return u''
return u'<a href="%s">' % url_for('board/topic_detail', topic_id=self.threadid)
class PostLinkTag(LinkTag):
def __init__(self, name, **kwargs):
LinkTag.__init__(self, name, annotate_links=False)
def render_open(self, parser, node_index):
self.postid = None
tag_data = parser.tag_data
nest_level = tag_data['link_nest_level'] = tag_data.setdefault('link_nest_level', 0) + 1
if nest_level > 1:
return u''
try:
if self.params:
self.postid = int(self.params.strip())
else:
self.postid = int(self.get_contents_text(parser).strip())
except ValueError:
return u''
from pyClanSphere.api import url_for
from pyClanSphere.plugins.bulletin_board.models import Post
post = Post.query.get(self.postid)
if post is None:
self.postid = None
return u''
return u'<a href="%s">' % url_for('board/post_find', post_id=self.postid)
class QuoteTag(TagBase):
def __init__(self, name, **kwargs):
TagBase.__init__(self, name, strip_first_newline=True)
def open(self, parser, *args):
TagBase.open(self, parser, *args)
def close(self, parser, *args):
TagBase.close(self, parser, *args)
def render_open(self, parser, node_index):
if self.params:
return u'<blockquote><em>%s</em><br/>'%(PostMarkup.standard_replace(self.params))
else:
return u'<blockquote>'
def render_close(self, parser, node_index):
return u"</blockquote>"
class SearchTag(TagBase):
def __init__(self, name, url, label="", annotate_links=True, **kwargs):
TagBase.__init__(self, name, inline=True)
self.url = url
self.label = label
self.annotate_links = annotate_links
def render_open(self, parser, node_idex):
if self.params:
search=self.params
else:
search=self.get_contents(parser)
link = u'<a href="%s">' % self.url
if u'%' in link:
return link%quote_plus(search.encode("UTF-8"))
else:
return link
def render_close(self, parser, node_index):
if self.label:
if self.annotate_links:
return u'</a>'+ annotate_link(self.label)
else:
return u'</a>'
else:
return u''
class PygmentsCodeTag(TagBase):
def __init__(self, name, pygments_line_numbers=False, **kwargs):
TagBase.__init__(self, name, enclosed=True, strip_first_newline=True)
self.line_numbers = pygments_line_numbers
def render_open(self, parser, node_index):
contents = self.get_contents(parser).strip('\n')
self.skip_contents(parser)
try:
lexer = get_lexer_by_name(self.params)
except ClassNotFound:
contents = _escape(contents)
return '<div class="code"><pre>%s</pre></div>' % contents
formatter = HtmlFormatter(linenos=self.line_numbers, cssclass="code")
hcontents = highlight(contents, lexer, formatter)
hcontents = hcontents.strip().replace('\n', '<br>')
return hcontents
class CodeTag(TagBase):
def __init__(self, name, **kwargs):
TagBase.__init__(self, name, enclosed=True, strip_first_newline=True)
def render_open(self, parser, node_index):
contents = _escape_no_breaks(self.get_contents(parser))
self.skip_contents(parser)
return '<div class="code"><pre>%s</pre></div>' % contents.replace("\n", "<br/>")
class NoparseTag(TagBase):
def __init__(self, name, **kwargs):
TagBase.__init__(self, name, enclosed=True, strip_first_newline=True)
def render_open(self, parser, node_index):
contents = _escape_no_breaks(self.get_contents(parser))
self.skip_contents(parser)
return contents
class ImgTag(TagBase):
def __init__(self, name, **kwargs):
TagBase.__init__(self, name, inline=True)
def render_open(self, parser, node_index):
contents = self.get_contents(parser)
self.skip_contents(parser)
contents = strip_bbcode(contents).replace(u'"', "%22")
return u'<img src="%s"></img>' % contents
class ListTag(TagBase):
def __init__(self, name, **kwargs):
TagBase.__init__(self, name, strip_first_newline=True)
def open(self, parser, params, open_pos, node_index):
TagBase.open(self, parser, params, open_pos, node_index)
def close(self, parser, close_pos, node_index):
TagBase.close(self, parser, close_pos, node_index)
def render_open(self, parser, node_index):
self.close_tag = u""
tag_data = parser.tag_data
tag_data.setdefault("ListTag.count", 0)
if tag_data["ListTag.count"]:
return u""
tag_data["ListTag.count"] += 1
tag_data["ListItemTag.initial_item"]=True
if self.params == "1":
self.close_tag = u"</li></ol>"
return u"<ol><li>"
elif self.params == "a":
self.close_tag = u"</li></ol>"
return u'<ol style="list-style-type: lower-alpha;"><li>'
elif self.params == "A":
self.close_tag = u"</li></ol>"
return u'<ol style="list-style-type: upper-alpha;"><li>'
else:
self.close_tag = u"</li></ul>"
return u"<ul><li>"
def render_close(self, parser, node_index):
tag_data = parser.tag_data
tag_data["ListTag.count"] -= 1
return self.close_tag
class ListItemTag(TagBase):
def __init__(self, name, **kwargs):
TagBase.__init__(self, name)
self.closed = False
def render_open(self, parser, node_index):
tag_data = parser.tag_data
if not tag_data.setdefault("ListTag.count", 0):
return u""
if tag_data["ListItemTag.initial_item"]:
tag_data["ListItemTag.initial_item"] = False
return
return u"</li><li>"
class SizeTag(TagBase):
valid_chars = frozenset("0123456789")
def __init__(self, name, **kwargs):
TagBase.__init__(self, name, inline=True)
def render_open(self, parser, node_index):
try:
self.size = int( "".join([c for c in self.params if c in self.valid_chars]) )
except ValueError:
self.size = None
if self.size is None:
return u""
self.size = self.validate_size(self.size)
return u'<span style="font-size:%spx">' % self.size
def render_close(self, parser, node_index):
if self.size is None:
return u""
return u'</span>'
def validate_size(self, size):
size = min(64, size)
size = max(4, size)
return size
class ColorTag(TagBase):
valid_chars = frozenset("#0123456789abcdefghijklmnopqrstuvwxyz")
def __init__(self, name, **kwargs):
TagBase.__init__(self, name, inline=True)
def render_open(self, parser, node_index):
valid_chars = self.valid_chars
try:
color = self.params.split()[0].lower()
self.color = "".join([c for c in color if c in valid_chars])
except IndexError:
self.color = None
if not self.color:
return u""
return u'<span style="color:%s">' % self.color
def render_close(self, parser, node_index):
if not self.color:
return u''
return u'</span>'
class FontTag(TagBase):
valid_chars = frozenset("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ -")
def __init__(self, name, **kwargs):
TagBase.__init__(self, name, inline=True)
def render_open(self, parser, node_index):
valid_chars = self.valid_chars
try:
fontfamily = self.params.split()[0]
self.fontfamily = "".join([c for c in fontfamily if c in valid_chars])
except IndexError:
self.fontfamily = None
if not self.fontfamily:
return u''
return u'<span style="font-family:%s">' % self.fontfamily
def render_close(self, parser, node_index):
if not self.fontfamily:
return u''
return u'</span>'
class LeftTag(DivStyleTag):
def __init__(self, name, **kwargs):
DivStyleTag.__init__(self, name, 'text-align', 'left')
class RightTag(DivStyleTag):
def __init__(self, name, **kwargs):
DivStyleTag.__init__(self, name, 'text-align', 'right')
class CenterTag(DivStyleTag):
def __init__(self, name, **kwargs):
DivStyleTag.__init__(self, name, 'text-align', 'center')
class WarningTag(TagBase):
def render_open(self, parser, node_index, **kwargs):
return u'<div class="contentwarning"><b>Warning:</b> '
def render_close(self, parser, node_index):
return u'</div>'
class NoteTag(TagBase):
def render_open(self, parser, node_index, **kwargs):
return u'<div class="contentnote"><b>Note:</b> '
def render_close(self, parser, node_index):
return u'</div>'
class ParagraphTag(TagBase):
def __init__(self, name, **kwargs):
TagBase.__init__(self, name, inline=True)
def render_open(self, parser, node_index, **kwargs):
tag_data = parser.tag_data
level = tag_data.setdefault('ParagraphTag.level', 0)
ret = []
if level > 0:
ret.append(u'</p>\n')
tag_data['ParagraphTag.level'] -= 1;
ret.append(u'<p>')
tag_data['ParagraphTag.level'] += 1;
return u''.join(ret)
def render_close(self, parser, node_index):
tag_data = parser.tag_data
level = tag_data.setdefault('ParagraphTag.level', 0)
if not level:
return u''
tag_data['ParagraphTag.level'] -= 1;
return u'</p>'
class SectionTag(TagBase):
"""A specialised tag that stores its contents in a dictionary. Can be
used to define extra contents areas.
"""
def __init__(self, name, **kwargs):
TagBase.__init__(self, name, enclosed=True, strip_first_newline=True)
def render_open(self, parser, node_index):
self.section_name = self.params.strip().lower().replace(u' ', u'_')
contents = self.get_contents(parser)
self.skip_contents(parser)
tag_data = parser.tag_data
sections = tag_data['output'].setdefault('sections', {})
sections.setdefault(self.section_name, []).append(contents)
return u''
# http://effbot.org/zone/python-replace.htm
class MultiReplace:
def __init__(self, repl_dict):
# string to string mapping; use a regular expression
keys = repl_dict.keys()
keys.sort(reverse=True) # lexical order
pattern = u"|".join([re.escape(key) for key in keys])
self.pattern = re.compile(pattern)
self.dict = repl_dict
def replace(self, s):
# apply replacement dictionary to string
def repl(match, get=self.dict.get):
item = match.group(0)
return get(item, item)
return self.pattern.sub(repl, s)
__call__ = replace
def _escape(s):
return PostMarkup.standard_replace(s.rstrip('\n'))
escape = _escape
def _escape_no_breaks(s):
return PostMarkup.standard_replace_no_break(s.rstrip('\n'))
def _unescape(s):
return PostMarkup.standard_unreplace(s)
_re_dquotes = re.compile(r'''".*?"''')
_re_squotes = re.compile(r"\s'(.+?)'")
def _cosmetic_replace(s):
s = PostMarkup.cosmetic_replace(s)
def repl_dquotes(match):
quoted_s = match.group(0)
quoted_s = "“%s”" % quoted_s[1:-1]
return quoted_s
s = _re_dquotes.sub(repl_dquotes, s)
def repl_squotes(match):
quoted_s = match.group(1)
quoted_s = " ‘%s’" % quoted_s
return quoted_s
s = _re_squotes.sub(repl_squotes, s)
return s
class TagFactory(object):
def __init__(self):
self.tags = {}
@classmethod
def tag_factory_callable(cls, tag_class, name, *args, **kwargs):
"""
Returns a callable that returns a new tag instance.
"""
def make():
return tag_class(name, *args, **kwargs)
return make
def add_tag(self, cls, name, *args, **kwargs):
self.tags[name] = self.tag_factory_callable(cls, name, *args, **kwargs)
def __getitem__(self, name):
return self.tags[name]()
def __contains__(self, name):
return name in self.tags
def get(self, name, default=None):
if name in self.tags:
return self.tags[name]()
return default
class _Parser(object):
""" This is an interface to the parser, used by Tag classes. """
def __init__(self, post_markup, tag_data=None):
self.pm = post_markup
if tag_data is None:
self.tag_data = {}
else:
self.tag_data = tag_data
self.render_node_index = 0
def skip_to_node(self, node_index):
""" Skips to a node, ignoring intermediate nodes. """
assert node_index is not None, "Node index must be non-None"
self.render_node_index = node_index
def get_text_nodes(self, node1, node2):
""" Retrieves the text nodes between two node indices. """
if node2 is None:
node2 = node1+1
return [node for node in self.nodes[node1:node2] if not callable(node)]
def begin_no_breaks(self):
"""Disables replacing of newlines with break tags at the start and end of text nodes.
Can only be called from a tags 'open' method.
"""
assert self.phase==1, "Can not be called from render_open or render_close"
self.no_breaks_count += 1
def end_no_breaks(self):
"""Re-enables auto-replacing of newlines with break tags (see begin_no_breaks)."""
assert self.phase==1, "Can not be called from render_open or render_close"
if self.no_breaks_count:
self.no_breaks_count -= 1
class PostMarkup(object):
standard_replace = MultiReplace({ u'<':u'<',
u'>':u'>',
u'&':u'&',
u'\n':u'<br/>'})
standard_unreplace = MultiReplace({ u'<':u'<',
u'>':u'>',
u'&':u'&'})
standard_replace_no_break = MultiReplace({ u'<':u'<',
u'>':u'>',
u'&':u'&',})
cosmetic_replace = MultiReplace({ u'--':u'–',
u'---':u'—',
u'...':u'…',
u'(c)':u'©',
u'(reg)':u'®',
u'(tm)':u'™'
})
TOKEN_TAG, TOKEN_PTAG, TOKEN_TEXT = range(3)
_re_end_eq = re.compile(u"\]|\=", re.UNICODE)
_re_quote_end = re.compile(u'\"|\]', re.UNICODE)
# I tried to use RE's. Really I did.
@classmethod
def tokenize(cls, post):
re_end_eq = cls._re_end_eq
re_quote_end = cls._re_quote_end
text = True
pos = 0
def find_first(post, pos, re_ff):
try:
return re_ff.search(post, pos).start()
except AttributeError:
return -1
TOKEN_TAG, TOKEN_PTAG, TOKEN_TEXT = range(3)
post_find = post.find
while True:
brace_pos = post_find(u'[', pos)
if brace_pos == -1:
if pos<len(post):
yield TOKEN_TEXT, post[pos:], pos, len(post)
return
if brace_pos - pos > 0:
yield TOKEN_TEXT, post[pos:brace_pos], pos, brace_pos
pos = brace_pos
end_pos = pos+1
open_tag_pos = post_find(u'[', end_pos)
end_pos = find_first(post, end_pos, re_end_eq)
if end_pos == -1:
yield TOKEN_TEXT, post[pos:], pos, len(post)
return
if open_tag_pos != -1 and open_tag_pos < end_pos:
yield TOKEN_TEXT, post[pos:open_tag_pos], pos, open_tag_pos
end_pos = open_tag_pos
pos = end_pos
continue
if post[end_pos] == ']':
yield TOKEN_TAG, post[pos:end_pos+1], pos, end_pos+1
pos = end_pos+1
continue
if post[end_pos] == '=':
try:
end_pos += 1
while post[end_pos] == ' ':
end_pos += 1
if post[end_pos] != '"':
end_pos = post_find(u']', end_pos+1)
if end_pos == -1:
return
yield TOKEN_TAG, post[pos:end_pos+1], pos, end_pos+1
else:
end_pos = find_first(post, end_pos, re_quote_end)
if end_pos==-1:
return
if post[end_pos] == '"':
end_pos = post_find(u'"', end_pos+1)
if end_pos == -1:
return
end_pos = post_find(u']', end_pos+1)
if end_pos == -1:
return
yield TOKEN_PTAG, post[pos:end_pos+1], pos, end_pos+1
else:
yield TOKEN_TAG, post[pos:end_pos+1], pos, end_pos
pos = end_pos+1
except IndexError:
return
def add_tag(self, cls, name, *args, **kwargs):
return self.tag_factory.add_tag(cls, name, *args, **kwargs)
def tagify_urls(self, postmarkup ):
""" Surrounds urls with url bbcode tags. """
def repl(match):
return u'[url]%s[/url]' % match.group(0)
text_tokens = []
TOKEN_TEXT = PostMarkup.TOKEN_TEXT
for tag_type, tag_token, start_pos, end_pos in self.tokenize(postmarkup):
if tag_type == TOKEN_TEXT:
text_tokens.append(_re_url.sub(repl, tag_token))
else:
text_tokens.append(tag_token)
return u"".join(text_tokens)
def __init__(self, tag_factory=None):
self.tag_factory = tag_factory or TagFactory()
def default_tags(self):
""" Add some basic tags. """
add_tag = self.tag_factory.add_tag
add_tag(SimpleTag, u'b', u'strong')
add_tag(SimpleTag, u'i', u'em')
add_tag(SimpleTag, u'u', u'u')
add_tag(SimpleTag, u's', u's')
def get_supported_tags(self):
""" Returns a list of the supported tags. """
return sorted(self.tag_factory.tags.keys())
def insert_paragraphs(self, post_markup):
"""Inserts paragraph tags in place of newlines. A more complex task than
it may seem -- Multiple newlines result in just one paragraph tag, and
paragraph tags aren't inserted inside certain other tags (such as the
code tag). Returns a postmarkup string.
post_markup -- A string containing the raw postmarkup
"""
parts = [u'[p]']
tag_factory = self.tag_factory
enclosed_count = 0
TOKEN_TEXT = PostMarkup.TOKEN_TEXT
TOKEN_TAG = PostMarkup.TOKEN_TAG
for tag_type, tag_token, start_pos, end_pos in self.tokenize(post_markup):
if tag_type == TOKEN_TEXT:
if enclosed_count:
parts.append(post_markup[start_pos:end_pos])
else:
txt = post_markup[start_pos:end_pos]
txt = _re_break_groups.sub(u'[p]', txt)
parts.append(txt)
continue
elif tag_type == TOKEN_TAG:
tag_token = tag_token[1:-1].lstrip()
if ' ' in tag_token:
tag_name = tag_token.split(u' ', 1)[0]
else:
if '=' in tag_token:
tag_name = tag_token.split(u'=', 1)[0]
else:
tag_name = tag_token
else:
tag_token = tag_token[1:-1].lstrip()
tag_name = tag_token.split(u'=', 1)[0]
tag_name = tag_name.strip().lower()
end_tag = False
if tag_name.startswith(u'/'):
end_tag = True
tag_name = tag_name[1:]
tag = tag_factory.get(tag_name, None)
if tag is not None and tag.enclosed:
if end_tag:
enclosed_count -= 1
else:
enclosed_count += 1
parts.append(post_markup[start_pos:end_pos])
new_markup = u"".join(parts)
return new_markup
# Matches simple blank tags containing only whitespace
_re_blank_tags = re.compile(r"\<(\w+?)\>\s*\</\1\>")
@classmethod
def cleanup_html(cls, html):
"""Cleans up html. Currently only removes blank tags, i.e. tags containing only
whitespace. Only applies to tags without attributes. Tag removal is done
recursively until there are no more blank tags. So <strong><em></em></strong>
would be completely removed.
html -- A string containing (X)HTML
"""
original_html = ''
while original_html != html:
original_html = html
html = cls._re_blank_tags.sub(u"", html)
html = _re_break_groups.sub(u"\n", html)
return html
def render_to_html(self,
post_markup,
encoding="utf8",
exclude_tags=None,
auto_urls=True,
paragraphs=False,
clean=True,
cosmetic_replace=True,
tag_data=None):
"""Converts post markup (ie. bbcode) to XHTML. This method is threadsafe,
buy virtue that the state is entirely stored on the stack.
post_markup -- String containing bbcode.
encoding -- Encoding of string, defaults to "ascii" if the string is not
already unicode.
exclude_tags -- A collection of tag names to ignore.
auto_urls -- If True, then urls will be wrapped with url bbcode tags.
paragraphs -- If True then line breaks will be replaced with paragraph
tags, rather than break tags.
clean -- If True, html will be run through the cleanup_html method.
cosmetic_replace -- If True, then some 'smart' quotes will be enabled,
in addition to replacing some character sequences with html entities.
tag_data -- An optional dictionary to store tag data in. The default of
None will create a dictionary internaly. Set this to your own dictionary
if you want to retrieve information from the Tag Classes.
"""
if not isinstance(post_markup, unicode):
post_markup = unicode(post_markup, encoding, 'replace')
if auto_urls:
post_markup = self.tagify_urls(post_markup)
post_markup = post_markup.replace('\r\n', '\n')
if paragraphs:
post_markup = self.insert_paragraphs(post_markup)
parser = _Parser(self, tag_data=tag_data)
parser.tag_data.setdefault("output", {})
parser.markup = post_markup
if exclude_tags is None:
exclude_tags = []
tag_factory = self.tag_factory
nodes = []
parser.nodes = nodes
parser.phase = 1
parser.no_breaks_count = 0
enclosed_count = 0
open_stack = []
tag_stack = []
break_stack = []
remove_next_newline = False
def standard_replace(s):
s = self.standard_replace(s)
if cosmetic_replace:
s = _cosmetic_replace(s)
return s
def standard_replace_no_break(s):
s = self.standard_replace_no_break(s)
if cosmetic_replace:
s = _cosmetic_replace(s)
return s
def check_tag_stack(tag_name):
for tag in reversed(tag_stack):
if tag_name == tag.name:
return True
return False
def redo_break_stack():
while break_stack:
tag = break_stack.pop()
open_tag(tag)
tag_stack.append(tag)
def break_inline_tags():
while tag_stack:
if tag_stack[-1].inline:
tag = tag_stack.pop()
close_tag(tag)
break_stack.append(tag)
else:
break
def open_tag(tag):
def call(node_index):
if paragraphs and not isinstance(tag, ParagraphTag):
if not tag.inline:
tag_data = parser.tag_data
level = tag_data.get('ParagraphTag.level', 0)
if level:
tag_data['ParagraphTag.level'] = 0
return "</p>"+(tag.render_open(parser, node_index) or "")
return tag.render_open(parser, node_index)
else:
return tag.render_open(parser, node_index)
nodes.append(call)
def close_tag(tag):
def call(node_index):
return tag.render_close(parser, node_index)
nodes.append(call)
TOKEN_TEXT = PostMarkup.TOKEN_TEXT
TOKEN_TAG = PostMarkup.TOKEN_TAG
# Pass 1
for tag_type, tag_token, start_pos, end_pos in self.tokenize(post_markup):
raw_tag_token = tag_token
if tag_type == TOKEN_TEXT:
if parser.no_breaks_count:
tag_token = tag_token.rstrip()
if not tag_token.strip():
continue
if remove_next_newline:
tag_token = tag_token.lstrip(' ')
if tag_token.startswith('\n'):
tag_token = tag_token.lstrip(' ')[1:]
if not tag_token:
continue
remove_next_newline = False
if tag_stack and tag_stack[-1].strip_first_newline:
tag_token = tag_token.lstrip()
tag_stack[-1].strip_first_newline = False
if not tag_stack[-1]:
tag_stack.pop()
continue
if not enclosed_count:
redo_break_stack()
if paragraphs:
nodes.append(standard_replace_no_break(tag_token))
else:
nodes.append(standard_replace(tag_token))
continue
elif tag_type == TOKEN_TAG:
tag_token = tag_token[1:-1].lstrip()
if ' ' in tag_token:
tag_name, tag_attribs = tag_token.split(u' ', 1)
tag_attribs = tag_attribs.strip()
else:
if '=' in tag_token:
tag_name, tag_attribs = tag_token.split(u'=', 1)
tag_attribs = tag_attribs.strip()
else:
tag_name = tag_token
tag_attribs = u""
else:
tag_token = tag_token[1:-1].lstrip()
tag_name, tag_attribs = tag_token.split(u'=', 1)
tag_attribs = tag_attribs.strip()[1:-1]
tag_name = tag_name.strip().lower()
end_tag = False
if tag_name.startswith(u'/'):
end_tag = True
tag_name = tag_name[1:]
if enclosed_count and tag_stack[-1].name != tag_name:
continue
if tag_name in exclude_tags:
continue
if not end_tag:
tag = tag_factory.get(tag_name, None)
if tag is None:
continue
redo_break_stack()
if not tag.inline:
break_inline_tags()
tag.open(parser, tag_attribs, end_pos, len(nodes))
if tag.enclosed:
enclosed_count += 1
tag_stack.append(tag)
open_tag(tag)
if tag.auto_close:
tag = tag_stack.pop()
tag.close(self, start_pos, len(nodes)-1)
close_tag(tag)
else:
if break_stack and break_stack[-1].name == tag_name:
break_stack.pop()
tag.close(parser, start_pos, len(nodes))
elif check_tag_stack(tag_name):
while tag_stack[-1].name != tag_name:
tag = tag_stack.pop()
break_stack.append(tag)
close_tag(tag)
tag = tag_stack.pop()
tag.close(parser, start_pos, len(nodes))
if tag.enclosed:
enclosed_count -= 1
close_tag(tag)
if not tag.inline:
remove_next_newline = True
if tag_stack:
redo_break_stack()
while tag_stack:
tag = tag_stack.pop()
tag.close(parser, len(post_markup), len(nodes))
if tag.enclosed:
enclosed_count -= 1
close_tag(tag)
parser.phase = 2
# Pass 2
parser.nodes = nodes
text = []
parser.render_node_index = 0
while parser.render_node_index < len(parser.nodes):
i = parser.render_node_index
node_text = parser.nodes[i]
if callable(node_text):
node_text = node_text(i)
if node_text is not None:
text.append(node_text)
parser.render_node_index += 1
html = u"".join(text)
if clean:
html = self.cleanup_html(html)
return html
# A shortcut for render_to_html
__call__ = render_to_html
_postmarkup = create(use_pygments=pygments_available)
def render_bbcode(bbcode,
encoding="ascii",
exclude_tags=None,
auto_urls=True,
paragraphs=False,
clean=True,
tag_data=None):
""" Renders a bbcode string in to XHTML. This is a shortcut if you don't
need to customize any tags.
post_markup -- String containing bbcode.
encoding -- Encoding of string, defaults to "ascii" if the string is not
already unicode.
exclude_tags -- A collection of tag names to ignore.
auto_urls -- If True, then urls will be wrapped with url bbcode tags.
paragraphs -- If True then line breaks will be replaces with paragraph
tags, rather than break tags.
clean -- If True, html will be run through a cleanup_html method.
tag_data -- An optional dictionary to store tag data in. The default of
None will create a dictionary internally.
"""
return _postmarkup(bbcode,
encoding,
exclude_tags=exclude_tags,
auto_urls=auto_urls,
paragraphs=paragraphs,
clean=clean,
tag_data=tag_data)
def _tests():
import sys
#sys.stdout=open('test.htm', 'w')
post_markup = create(use_pygments=True)
tests = []
print """<link rel="stylesheet" href="code.css" type="text/css" />\n"""
tests.append(']')
tests.append('[')
tests.append(':-[ Hello, [b]World[/b]')
tests.append("[link=http://www.willmcgugan.com]My homepage[/link]")
tests.append('[link="http://www.willmcgugan.com"]My homepage[/link]')
tests.append("[link http://www.willmcgugan.com]My homepage[/link]")
tests.append("[link]http://www.willmcgugan.com[/link]")
tests.append(u"[b]Hello André[/b]")
tests.append(u"[google]André[/google]")
tests.append("[s]Strike through[/s]")
tests.append("[b]bold [i]bold and italic[/b] italic[/i]")
tests.append("[google]Will McGugan[/google]")
tests.append("[wiki Will McGugan]Look up my name in Wikipedia[/wiki]")
tests.append("[quote Will said...]BBCode is very cool[/quote]")
tests.append("""[code python]
# A proxy object that calls a callback when converted to a string
class TagStringify(object):
def __init__(self, callback, raw):
self.callback = callback
self.raw = raw
r[b]=3
def __str__(self):
return self.callback()
def __repr__(self):
return self.__str__()
[/code]""")
tests.append(u"[img]http://upload.wikimedia.org/wikipedia/commons"\
"/6/61/Triops_longicaudatus.jpg[/img]")
tests.append("[list][*]Apples[*]Oranges[*]Pears[/list]")
tests.append("""[list=1]
[*]Apples
[*]Oranges
are not the only fruit
[*]Pears
[/list]""")
tests.append("[list=a][*]Apples[*]Oranges[*]Pears[/list]")
tests.append("[list=A][*]Apples[*]Oranges[*]Pears[/list]")
long_test="""[b]Long test[/b]
New lines characters are converted to breaks."""\
"""Tags my be [b]ove[i]rl[/b]apped[/i].
[i]Open tags will be closed.
[b]Test[/b]"""
tests.append(long_test)
tests.append("[dict]Will[/dict]")
tests.append("[code unknownlanguage]10 print 'In yr code'; 20 goto 10[/code]")
tests.append("[url=http://www.google.com/coop/cse?cx=006850030468302103399%3Amqxv78bdfdo]CakePHP Google Groups[/url]")
tests.append("[url=http://www.google.com/search?hl=en&safe=off&client=opera&rls=en&hs=pO1&q=python+bbcode&btnG=Search]Search for Python BBCode[/url]")
#tests = []
# Attempt to inject html in to unicode
tests.append("[url=http://www.test.com/sfsdfsdf/ter?t=\"></a><h1>HACK</h1><a>\"]Test Hack[/url]")
tests.append('Nested urls, i.e. [url][url]www.becontrary.com[/url][/url], are condensed in to a single tag.')
tests.append(u'[google]ɸβfvθðsz[/google]')
tests.append(u'[size 30]Hello, World![/size]')
tests.append(u'[color red]This should be red[/color]')
tests.append(u'[color #0f0]This should be green[/color]')
tests.append(u"[center]This should be in the center!")
tests.append('Nested urls, i.e. [url][url]www.becontrary.com[/url][/url], are condensed in to a single tag.')
#tests = []
tests.append('[b]Hello, [i]World[/b]! [/i]')
tests.append('[b][center]This should be centered![/center][/b]')
tests.append('[list][*]Hello[i][*]World![/i][/list]')
tests.append("""[list=1]
[*]Apples
[*]Oranges
are not the only fruit
[*]Pears
[/list]""")
tests.append("[b]urls such as http://www.willmcgugan.com are authomaticaly converted to links[/b]")
tests.append("""
[b]
[code python]
parser.markup[self.open_pos:self.close_pos]
[/code]
asdasdasdasdqweqwe
""")
tests.append("""[list 1]
[*]Hello
[*]World
[/list]""")
#tests = []
tests.append("[b][p]Hello, [p]World")
tests.append("[p][p][p]")
tests.append("http://www.google.com/search?as_q=bbcode&btnG=%D0%9F%D0%BE%D0%B8%D1%81%D0%BA")
#tests=["""[b]b[i]i[/b][/i]"""]
tests = []
tests.append("[code python] import this[/code]")
for test in tests:
print u"<pre>%s</pre>"%str(test.encode("ascii", "xmlcharrefreplace"))
print u"<p>%s</p>"%str(post_markup(test, paragraphs=True).encode("ascii", "xmlcharrefreplace"))
print u"<hr/>"
print
#print repr(post_markup('[url=<script>Attack</script>]Attack[/url]'))
#print repr(post_markup('http://www.google.com/search?as_q=%D0%9F%D0%BE%D0%B8%D1%81%D0%BA&test=hai'))
#p = create(use_pygments=False)
#print (p('[code]foo\nbar[/code]'))
#print render_bbcode("[b]For the lazy, use the http://www.willmcgugan.com render_bbcode function.[/b]")
smarkup = create()
smarkup.add_tag(SectionTag, 'section')
test = """Hello, World.[b][i]This in italics
[section sidebar]This is the [b]sidebar[/b][/section]
[section footer]
This is the footer
[/section]
More text"""
print smarkup(test, paragraphs=True, clean=False)
tag_data = {}
print smarkup(test, tag_data=tag_data, paragraphs=True, clean=True)
print tag_data
def _run_unittests():
# TODO: Expand tests for better coverage!
import unittest
class TestPostmarkup(unittest.TestCase):
def testcleanuphtml(self):
postmarkup = create()
tests = [("""\n<p>\n </p>\n""", ""),
("""<b>\n\n<i> </i>\n</b>Test""", "Test"),
("""<p id="test">Test</p>""", """<p id="test">Test</p>"""),]
for test, result in tests:
self.assertEqual(PostMarkup.cleanup_html(test).strip(), result)
def testsimpletag(self):
postmarkup = create()
tests= [ ('[b]Hello[/b]', "<strong>Hello</strong>"),
('[i]Italic[/i]', "<em>Italic</em>"),
('[s]Strike[/s]', "<strike>Strike</strike>"),
('[u]underlined[/u]', "<u>underlined</u>"),
]
for test, result in tests:
self.assertEqual(postmarkup(test), result)
def testoverlap(self):
postmarkup = create()
tests= [ ('[i][b]Hello[/i][/b]', "<em><strong>Hello</strong></em>"),
('[b]bold [u]both[/b] underline[/u]', '<strong>bold <u>both</u></strong><u> underline</u>')
]
for test, result in tests:
self.assertEqual(postmarkup(test), result)
def testlinks(self):
postmarkup = create(annotate_links=False)
tests= [ ('[link=http://www.willmcgugan.com]blog1[/link]', '<a href="http://www.willmcgugan.com">blog1</a>'),
('[link="http://www.willmcgugan.com"]blog2[/link]', '<a href="http://www.willmcgugan.com">blog2</a>'),
('[link http://www.willmcgugan.com]blog3[/link]', '<a href="http://www.willmcgugan.com">blog3</a>'),
('[link]http://www.willmcgugan.com[/link]', '<a href="http://www.willmcgugan.com">http://www.willmcgugan.com</a>')
]
for test, result in tests:
self.assertEqual(postmarkup(test), result)
suite = unittest.TestLoader().loadTestsFromTestCase(TestPostmarkup)
unittest.TextTestRunner(verbosity=2).run(suite)
def _ff_test():
def ff1(post, pos, c1, c2):
f1 = post.find(c1, pos)
f2 = post.find(c2, pos)
if f1 == -1:
return f2
if f2 == -1:
return f1
return min(f1, f2)
re_ff=re.compile('a|b', re.UNICODE)
def ff2(post, pos, c1, c2):
try:
return re_ff.search(post).group(0)
except AttributeError:
return -1
text = u"sdl;fk;sdlfks;dflksd;flksdfsdfwerwerwgwegwegwegwegwegegwweggewwegwegwegwettttttttttttttttttttttttttttttttttgggggggggg;slbdfkwelrkwelrkjal;sdfksdl;fksdf;lb"
REPEAT = 100000
from time import time
start = time()
for n in xrange(REPEAT):
ff1(text, 0, "a", "b")
end = time()
print end - start
start = time()
for n in xrange(REPEAT):
ff2(text, 0, "a", "b")
end = time()
print end - start
if __name__ == "__main__":
#_tests()
#_run_unittests()
#print _cosmetic_replace(''' "Hello, World!"... -- and --- more 'single quotes'! sdfsdf''')
t = """http://www.willmcgugan.com#comment5
"""
print render_bbcode(t)
#_ff_test()
| {
"content_hash": "fb0b3c21ba746aa5296bc8a5bd3c69da",
"timestamp": "",
"source": "github",
"line_count": 1721,
"max_line_length": 168,
"avg_line_length": 29.829169087739686,
"alnum_prop": 0.5423484494311984,
"repo_name": "jokey2k/pyClanSphere",
"id": "0bb29cdf1e785d36a94522dab195163bb86ae340",
"size": "51367",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyClanSphere/_ext/postmarkup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "188174"
},
{
"name": "Python",
"bytes": "891594"
}
],
"symlink_target": ""
} |
from app import app
from config import PORT
app.run(host='0.0.0.0', port=int(PORT), debug=False)
| {
"content_hash": "8a4593e148079e501772e941a345ff90",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 52,
"avg_line_length": 24.5,
"alnum_prop": 0.7244897959183674,
"repo_name": "PradheepShrinivasan/picourl",
"id": "274d195de04e9606f16bd13b4617625a7f6103ad",
"size": "118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "309"
},
{
"name": "HTML",
"bytes": "7637"
},
{
"name": "Makefile",
"bytes": "455"
},
{
"name": "Python",
"bytes": "37033"
}
],
"symlink_target": ""
} |
import operator
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.decorators import method_decorator # noqa
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_post_parameters # noqa
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.users \
import forms as project_forms
from openstack_dashboard.dashboards.admin.users \
import tables as project_tables
class IndexView(tables.DataTableView):
table_class = project_tables.UsersTable
template_name = 'admin/users/index.html'
def get_data(self):
users = []
domain_context = self.request.session.get('domain_context', None)
try:
users = api.keystone.user_list(self.request,
domain=domain_context)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve user list.'))
return users
class UpdateView(forms.ModalFormView):
form_class = project_forms.UpdateUserForm
template_name = 'admin/users/update.html'
success_url = reverse_lazy('horizon:admin:users:index')
@method_decorator(sensitive_post_parameters('password',
'confirm_password'))
def dispatch(self, *args, **kwargs):
return super(UpdateView, self).dispatch(*args, **kwargs)
@memoized.memoized_method
def get_object(self):
try:
return api.keystone.user_get(self.request, self.kwargs['user_id'],
admin=True)
except Exception:
redirect = reverse("horizon:admin:users:index")
exceptions.handle(self.request,
_('Unable to update user.'),
redirect=redirect)
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
context['user'] = self.get_object()
return context
def get_initial(self):
user = self.get_object()
domain_id = getattr(user, "domain_id", None)
domain_name = ''
# Retrieve the domain name where the project belong
if api.keystone.VERSIONS.active >= 3:
try:
domain = api.keystone.domain_get(self.request,
domain_id)
domain_name = domain.name
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve project domain.'))
return {'domain_id': domain_id,
'domain_name': domain_name,
'id': user.id,
'first_name': getattr(user, 'first_name', None),
'project': user.project_id,
'email': getattr(user, 'email', None)}
class CreateView(forms.ModalFormView):
form_class = project_forms.CreateUserForm
template_name = 'admin/users/create.html'
success_url = reverse_lazy('horizon:admin:users:index')
@method_decorator(sensitive_post_parameters('password',
'confirm_password'))
def dispatch(self, *args, **kwargs):
return super(CreateView, self).dispatch(*args, **kwargs)
def get_form_kwargs(self):
kwargs = super(CreateView, self).get_form_kwargs()
try:
roles = api.keystone.role_list(self.request)
except Exception:
redirect = reverse("horizon:admin:users:index")
exceptions.handle(self.request,
_("Unable to retrieve user roles."),
redirect=redirect)
roles.sort(key=operator.attrgetter("id"))
kwargs['roles'] = roles
return kwargs
def get_initial(self):
# Set the domain of the user
domain = api.keystone.get_default_domain(self.request)
default_role = api.keystone.get_default_role(self.request)
return {'domain_id': domain.id,
'domain_name': domain.name,
'role_id': getattr(default_role, "id", None)}
| {
"content_hash": "0d6c7895e9c436d02b9da581f4ff2390",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 78,
"avg_line_length": 37.94736842105263,
"alnum_prop": 0.5968562182154415,
"repo_name": "JioCloud/horizon",
"id": "ee8f3dd37299d87582866bb3b79236eeb9a38290",
"size": "5090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/admin/users/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "296932"
},
{
"name": "JavaScript",
"bytes": "713370"
},
{
"name": "Python",
"bytes": "3614755"
},
{
"name": "Shell",
"bytes": "15387"
}
],
"symlink_target": ""
} |
import tempfile
from typing import Any, TypeVar
from azure.cli.core import AzCommandsLoader
from azure.cli.core._config import ENV_VAR_PREFIX
from azure.cli.core.cloud import get_active_cloud
from azure.cli.core.commands import AzCliCommand
from knack import CLI
MOCK_CLI_CONFIG_DIR = tempfile.mkdtemp()
MOCK_CLI_ENV_VAR_PREFIX = "MOCK_" + ENV_VAR_PREFIX
# type variables
ManagedCluster = TypeVar("ManagedCluster")
class MockClient:
def __init__(self):
pass
def get(self):
pass
def begin_create_or_update(
self, resource_group_name: str, resource_name: str, parameters: ManagedCluster, **kwargs: Any
):
pass
class MockCLI(CLI):
def __init__(self):
super(MockCLI, self).__init__(
cli_name="mock_cli",
config_dir=MOCK_CLI_CONFIG_DIR,
config_env_var_prefix=MOCK_CLI_ENV_VAR_PREFIX,
)
self.cloud = get_active_cloud(self)
class MockCmd:
def __init__(self, cli_ctx):
self.cli_ctx = cli_ctx
self.cmd = AzCliCommand(AzCommandsLoader(cli_ctx), "mock-cmd", None)
def supported_api_version(
self,
resource_type=None,
min_api=None,
max_api=None,
operation_group=None,
parameter_name=None,
):
return self.cmd.supported_api_version(
resource_type=resource_type,
min_api=min_api,
max_api=max_api,
operation_group=operation_group,
parameter_name=parameter_name,
)
def get_models(self, *attr_args, **kwargs):
return self.cmd.get_models(*attr_args, **kwargs)
class MockUrlretrieveUrlValidator(object):
def __init__(self, url, version):
self.url = url
self.version = version
def __eq__(self, other):
return other.startswith(self.url) and self.version in other
| {
"content_hash": "d772db2b63b30807844fae09366c02e8",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 101,
"avg_line_length": 26.267605633802816,
"alnum_prop": 0.6262734584450402,
"repo_name": "yugangw-msft/azure-cli",
"id": "1d001294767e26bf107f88d7a309e7356941f645",
"size": "2211",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/azure-cli/azure/cli/command_modules/acs/tests/latest/mocks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "5355"
},
{
"name": "Batchfile",
"bytes": "14110"
},
{
"name": "Bicep",
"bytes": "1679"
},
{
"name": "C#",
"bytes": "1971"
},
{
"name": "C++",
"bytes": "275"
},
{
"name": "Dockerfile",
"bytes": "8427"
},
{
"name": "HTML",
"bytes": "794"
},
{
"name": "JavaScript",
"bytes": "1404"
},
{
"name": "Jupyter Notebook",
"bytes": "389"
},
{
"name": "PowerShell",
"bytes": "1781"
},
{
"name": "Python",
"bytes": "24270340"
},
{
"name": "Rich Text Format",
"bytes": "12032"
},
{
"name": "Roff",
"bytes": "1036959"
},
{
"name": "Shell",
"bytes": "56023"
},
{
"name": "TSQL",
"bytes": "1145"
}
],
"symlink_target": ""
} |
from typing import TYPE_CHECKING
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Iterable, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_get_child_by_experiment_name_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
filter = kwargs.pop('filter', None) # type: Optional[str]
continuationtoken = kwargs.pop('continuationtoken', None) # type: Optional[str]
orderby = kwargs.pop('orderby', None) # type: Optional[List[str]]
sortorder = kwargs.pop('sortorder', None) # type: Optional[Union[str, "_models.SortOrderDirection"]]
top = kwargs.pop('top', None) # type: Optional[int]
count = kwargs.pop('count', None) # type: Optional[bool]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/{runId}/children") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"runId": _SERIALIZER.url("run_id", run_id, 'str'),
"experimentName": _SERIALIZER.url("experiment_name", experiment_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if filter is not None:
_query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str')
if continuationtoken is not None:
_query_parameters['$continuationtoken'] = _SERIALIZER.query("continuationtoken", continuationtoken, 'str')
if orderby is not None:
_query_parameters['$orderby'] = _SERIALIZER.query("orderby", orderby, '[str]')
if sortorder is not None:
_query_parameters['$sortorder'] = _SERIALIZER.query("sortorder", sortorder, 'str')
if top is not None:
_query_parameters['$top'] = _SERIALIZER.query("top", top, 'int')
if count is not None:
_query_parameters['$count'] = _SERIALIZER.query("count", count, 'bool')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_get_child_by_experiment_id_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
filter = kwargs.pop('filter', None) # type: Optional[str]
continuationtoken = kwargs.pop('continuationtoken', None) # type: Optional[str]
orderby = kwargs.pop('orderby', None) # type: Optional[List[str]]
sortorder = kwargs.pop('sortorder', None) # type: Optional[Union[str, "_models.SortOrderDirection"]]
top = kwargs.pop('top', None) # type: Optional[int]
count = kwargs.pop('count', None) # type: Optional[bool]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{runId}/children") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"runId": _SERIALIZER.url("run_id", run_id, 'str'),
"experimentId": _SERIALIZER.url("experiment_id", experiment_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if filter is not None:
_query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str')
if continuationtoken is not None:
_query_parameters['$continuationtoken'] = _SERIALIZER.query("continuationtoken", continuationtoken, 'str')
if orderby is not None:
_query_parameters['$orderby'] = _SERIALIZER.query("orderby", orderby, '[str]')
if sortorder is not None:
_query_parameters['$sortorder'] = _SERIALIZER.query("sortorder", sortorder, 'str')
if top is not None:
_query_parameters['$top'] = _SERIALIZER.query("top", top, 'int')
if count is not None:
_query_parameters['$count'] = _SERIALIZER.query("count", count, 'bool')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_get_child_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
filter = kwargs.pop('filter', None) # type: Optional[str]
continuationtoken = kwargs.pop('continuationtoken', None) # type: Optional[str]
orderby = kwargs.pop('orderby', None) # type: Optional[List[str]]
sortorder = kwargs.pop('sortorder', None) # type: Optional[Union[str, "_models.SortOrderDirection"]]
top = kwargs.pop('top', None) # type: Optional[int]
count = kwargs.pop('count', None) # type: Optional[bool]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/runs/{runId}/children") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"runId": _SERIALIZER.url("run_id", run_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if filter is not None:
_query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str')
if continuationtoken is not None:
_query_parameters['$continuationtoken'] = _SERIALIZER.query("continuationtoken", continuationtoken, 'str')
if orderby is not None:
_query_parameters['$orderby'] = _SERIALIZER.query("orderby", orderby, '[str]')
if sortorder is not None:
_query_parameters['$sortorder'] = _SERIALIZER.query("sortorder", sortorder, 'str')
if top is not None:
_query_parameters['$top'] = _SERIALIZER.query("top", top, 'int')
if count is not None:
_query_parameters['$count'] = _SERIALIZER.query("count", count, 'bool')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_get_details_by_experiment_id_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{runId}/details") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"runId": _SERIALIZER.url("run_id", run_id, 'str'),
"experimentId": _SERIALIZER.url("experiment_id", experiment_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_get_details_by_experiment_name_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/{runId}/details") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"runId": _SERIALIZER.url("run_id", run_id, 'str'),
"experimentName": _SERIALIZER.url("experiment_name", experiment_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_get_details_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/runs/{runId}/details") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"runId": _SERIALIZER.url("run_id", run_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_get_run_data_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/rundata") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_batch_get_run_data_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchrundata") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_batch_add_or_modify_by_experiment_id_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
experiment_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/batch/runs") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"experimentId": _SERIALIZER.url("experiment_id", experiment_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_batch_add_or_modify_by_experiment_name_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
experiment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/batch/runs") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"experimentName": _SERIALIZER.url("experiment_name", experiment_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_add_or_modify_by_experiment_name_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/{runId}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"runId": _SERIALIZER.url("run_id", run_id, 'str', pattern=r'^[a-zA-Z0-9][\w-]{0,255}$'),
"experimentName": _SERIALIZER.url("experiment_name", experiment_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_get_by_experiment_name_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/{runId}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"runId": _SERIALIZER.url("run_id", run_id, 'str'),
"experimentName": _SERIALIZER.url("experiment_name", experiment_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_add_or_modify_by_experiment_id_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{runId}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"runId": _SERIALIZER.url("run_id", run_id, 'str', pattern=r'^[a-zA-Z0-9][\w-]{0,255}$'),
"experimentId": _SERIALIZER.url("experiment_id", experiment_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_get_by_experiment_id_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{runId}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"runId": _SERIALIZER.url("run_id", run_id, 'str'),
"experimentId": _SERIALIZER.url("experiment_id", experiment_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_add_or_modify_experiment_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/runs/{runId}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"runId": _SERIALIZER.url("run_id", run_id, 'str', pattern=r'^[a-zA-Z0-9][\w-]{0,255}$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_add_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/runs/{runId}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"runId": _SERIALIZER.url("run_id", run_id, 'str', pattern=r'^[a-zA-Z0-9][\w-]{0,255}$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_get_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/runs/{runId}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"runId": _SERIALIZER.url("run_id", run_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_delete_tags_by_experiment_id_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{runId}/tags") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"runId": _SERIALIZER.url("run_id", run_id, 'str'),
"experimentId": _SERIALIZER.url("experiment_id", experiment_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_modify_or_delete_tags_by_experiment_id_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{runId}/tags") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"runId": _SERIALIZER.url("run_id", run_id, 'str'),
"experimentId": _SERIALIZER.url("experiment_id", experiment_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_delete_tags_by_experiment_name_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/{runId}/tags") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"runId": _SERIALIZER.url("run_id", run_id, 'str'),
"experimentName": _SERIALIZER.url("experiment_name", experiment_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_modify_or_delete_tags_by_experiment_name_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/{runId}/tags") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"runId": _SERIALIZER.url("run_id", run_id, 'str'),
"experimentName": _SERIALIZER.url("experiment_name", experiment_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_delete_tags_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/runs/{runId}/tags") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"runId": _SERIALIZER.url("run_id", run_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_delete_run_services_by_experiment_id_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{runId}/services") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"runId": _SERIALIZER.url("run_id", run_id, 'str'),
"experimentId": _SERIALIZER.url("experiment_id", experiment_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_delete_run_services_by_experiment_name_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/{runId}/services") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"runId": _SERIALIZER.url("run_id", run_id, 'str'),
"experimentName": _SERIALIZER.url("experiment_name", experiment_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_delete_run_services_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/runs/{runId}/services") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"runId": _SERIALIZER.url("run_id", run_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_add_or_modify_run_service_instances_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
node_id, # type: int
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/runs/{runId}/serviceinstances/{nodeId}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"runId": _SERIALIZER.url("run_id", run_id, 'str'),
"nodeId": _SERIALIZER.url("node_id", node_id, 'int'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_get_run_service_instances_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
node_id, # type: int
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/runs/{runId}/serviceinstances/{nodeId}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"runId": _SERIALIZER.url("run_id", run_id, 'str'),
"nodeId": _SERIALIZER.url("node_id", node_id, 'int'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_get_by_query_by_experiment_name_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
experiment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs:query") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"experimentName": _SERIALIZER.url("experiment_name", experiment_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_get_by_query_by_experiment_id_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
experiment_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs:query") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"experimentId": _SERIALIZER.url("experiment_id", experiment_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_get_by_ids_by_experiment_id_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
experiment_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/runIds") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"experimentId": _SERIALIZER.url("experiment_id", experiment_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_get_by_ids_by_experiment_name_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
experiment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/runIds") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"experimentName": _SERIALIZER.url("experiment_name", experiment_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
headers=_header_parameters,
**kwargs
)
def build_cancel_run_with_uri_by_experiment_id_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
cancelation_reason = kwargs.pop('cancelation_reason', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{runId}/cancel") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"runId": _SERIALIZER.url("run_id", run_id, 'str'),
"experimentId": _SERIALIZER.url("experiment_id", experiment_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if cancelation_reason is not None:
_query_parameters['CancelationReason'] = _SERIALIZER.query("cancelation_reason", cancelation_reason, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_cancel_run_with_uri_by_experiment_name_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
cancelation_reason = kwargs.pop('cancelation_reason', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/{runId}/cancel") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"runId": _SERIALIZER.url("run_id", run_id, 'str'),
"experimentName": _SERIALIZER.url("experiment_name", experiment_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if cancelation_reason is not None:
_query_parameters['CancelationReason'] = _SERIALIZER.query("cancelation_reason", cancelation_reason, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
# fmt: on
class RunsOperations(object): # pylint: disable=too-many-public-methods
"""RunsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.machinelearningservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get_child_by_experiment_name(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_name, # type: str
filter=None, # type: Optional[str]
continuationtoken=None, # type: Optional[str]
orderby=None, # type: Optional[List[str]]
sortorder=None, # type: Optional[Union[str, "_models.SortOrderDirection"]]
top=None, # type: Optional[int]
count=None, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PaginatedRunList"]
"""get_child_by_experiment_name.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_name:
:type experiment_name: str
:param filter: Allows for filtering the collection of resources.
The expression specified is evaluated for each resource in the collection, and only items
where the expression evaluates to true are included in the response.
:type filter: str
:param continuationtoken: The continuation token to use for getting the next set of resources.
:type continuationtoken: str
:param orderby: The list of resource properties to use for sorting the requested resources.
:type orderby: list[str]
:param sortorder: The sort order of the returned resources. Not used, specify asc or desc after
each property name in the OrderBy parameter.
:type sortorder: str or ~azure.mgmt.machinelearningservices.models.SortOrderDirection
:param top: The maximum number of items in the resource collection to be included in the
result.
If not specified, all items are returned.
:type top: int
:param count: Whether to include a count of the matching resources along with the resources
returned in the response.
:type count: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PaginatedRunList or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.PaginatedRunList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PaginatedRunList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_child_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_name=experiment_name,
filter=filter,
continuationtoken=continuationtoken,
orderby=orderby,
sortorder=sortorder,
top=top,
count=count,
template_url=self.get_child_by_experiment_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_get_child_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_name=experiment_name,
filter=filter,
continuationtoken=continuationtoken,
orderby=orderby,
sortorder=sortorder,
top=top,
count=count,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PaginatedRunList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_child_by_experiment_name.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/{runId}/children"} # type: ignore
@distributed_trace
def get_child_by_experiment_id(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_id, # type: str
filter=None, # type: Optional[str]
continuationtoken=None, # type: Optional[str]
orderby=None, # type: Optional[List[str]]
sortorder=None, # type: Optional[Union[str, "_models.SortOrderDirection"]]
top=None, # type: Optional[int]
count=None, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PaginatedRunList"]
"""get_child_by_experiment_id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_id:
:type experiment_id: str
:param filter: Allows for filtering the collection of resources.
The expression specified is evaluated for each resource in the collection, and only items
where the expression evaluates to true are included in the response.
:type filter: str
:param continuationtoken: The continuation token to use for getting the next set of resources.
:type continuationtoken: str
:param orderby: The list of resource properties to use for sorting the requested resources.
:type orderby: list[str]
:param sortorder: The sort order of the returned resources. Not used, specify asc or desc after
each property name in the OrderBy parameter.
:type sortorder: str or ~azure.mgmt.machinelearningservices.models.SortOrderDirection
:param top: The maximum number of items in the resource collection to be included in the
result.
If not specified, all items are returned.
:type top: int
:param count: Whether to include a count of the matching resources along with the resources
returned in the response.
:type count: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PaginatedRunList or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.PaginatedRunList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PaginatedRunList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_child_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_id=experiment_id,
filter=filter,
continuationtoken=continuationtoken,
orderby=orderby,
sortorder=sortorder,
top=top,
count=count,
template_url=self.get_child_by_experiment_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_get_child_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_id=experiment_id,
filter=filter,
continuationtoken=continuationtoken,
orderby=orderby,
sortorder=sortorder,
top=top,
count=count,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PaginatedRunList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_child_by_experiment_id.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{runId}/children"} # type: ignore
@distributed_trace
def get_child(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
filter=None, # type: Optional[str]
continuationtoken=None, # type: Optional[str]
orderby=None, # type: Optional[List[str]]
sortorder=None, # type: Optional[Union[str, "_models.SortOrderDirection"]]
top=None, # type: Optional[int]
count=None, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PaginatedRunList"]
"""get_child.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param filter: Allows for filtering the collection of resources.
The expression specified is evaluated for each resource in the collection, and only items
where the expression evaluates to true are included in the response.
:type filter: str
:param continuationtoken: The continuation token to use for getting the next set of resources.
:type continuationtoken: str
:param orderby: The list of resource properties to use for sorting the requested resources.
:type orderby: list[str]
:param sortorder: The sort order of the returned resources. Not used, specify asc or desc after
each property name in the OrderBy parameter.
:type sortorder: str or ~azure.mgmt.machinelearningservices.models.SortOrderDirection
:param top: The maximum number of items in the resource collection to be included in the
result.
If not specified, all items are returned.
:type top: int
:param count: Whether to include a count of the matching resources along with the resources
returned in the response.
:type count: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PaginatedRunList or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.PaginatedRunList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PaginatedRunList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_child_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
filter=filter,
continuationtoken=continuationtoken,
orderby=orderby,
sortorder=sortorder,
top=top,
count=count,
template_url=self.get_child.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_get_child_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
filter=filter,
continuationtoken=continuationtoken,
orderby=orderby,
sortorder=sortorder,
top=top,
count=count,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PaginatedRunList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_child.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/runs/{runId}/children"} # type: ignore
@distributed_trace
def get_details_by_experiment_id(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.RunDetails"
"""get_details_by_experiment_id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_id:
:type experiment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RunDetails, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.RunDetails
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RunDetails"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_details_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_id=experiment_id,
template_url=self.get_details_by_experiment_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RunDetails', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_details_by_experiment_id.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{runId}/details"} # type: ignore
@distributed_trace
def get_details_by_experiment_name(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.RunDetails"
"""get_details_by_experiment_name.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_name:
:type experiment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RunDetails, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.RunDetails
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RunDetails"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_details_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_name=experiment_name,
template_url=self.get_details_by_experiment_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RunDetails', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_details_by_experiment_name.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/{runId}/details"} # type: ignore
@distributed_trace
def get_details(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.RunDetails"
"""get_details.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RunDetails, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.RunDetails
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RunDetails"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_details_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
template_url=self.get_details.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RunDetails', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_details.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/runs/{runId}/details"} # type: ignore
@distributed_trace
def get_run_data(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
body=None, # type: Optional["_models.GetRunDataRequest"]
**kwargs # type: Any
):
# type: (...) -> "_models.GetRunDataResult"
"""get_run_data.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.GetRunDataRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GetRunDataResult, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.GetRunDataResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GetRunDataResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'GetRunDataRequest')
else:
_json = None
request = build_get_run_data_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
template_url=self.get_run_data.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('GetRunDataResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_run_data.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/rundata"} # type: ignore
@distributed_trace
def batch_get_run_data(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
body=None, # type: Optional["_models.BatchRequest1"]
**kwargs # type: Any
):
# type: (...) -> "_models.BatchResult1"
"""batch_get_run_data.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.BatchRequest1
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BatchResult1, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.BatchResult1
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BatchResult1"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'BatchRequest1')
else:
_json = None
request = build_batch_get_run_data_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
template_url=self.batch_get_run_data.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 207]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('BatchResult1', pipeline_response)
if response.status_code == 207:
deserialized = self._deserialize('BatchResult1', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
batch_get_run_data.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchrundata"} # type: ignore
@distributed_trace
def batch_add_or_modify_by_experiment_id(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
experiment_id, # type: str
body=None, # type: Optional["_models.BatchAddOrModifyRunRequest"]
**kwargs # type: Any
):
# type: (...) -> "_models.BatchRunResult"
"""batch_add_or_modify_by_experiment_id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_id:
:type experiment_id: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.BatchAddOrModifyRunRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BatchRunResult, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.BatchRunResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BatchRunResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'BatchAddOrModifyRunRequest')
else:
_json = None
request = build_batch_add_or_modify_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_id=experiment_id,
content_type=content_type,
json=_json,
template_url=self.batch_add_or_modify_by_experiment_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('BatchRunResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
batch_add_or_modify_by_experiment_id.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/batch/runs"} # type: ignore
@distributed_trace
def batch_add_or_modify_by_experiment_name(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
experiment_name, # type: str
body=None, # type: Optional["_models.BatchAddOrModifyRunRequest"]
**kwargs # type: Any
):
# type: (...) -> "_models.BatchRunResult"
"""batch_add_or_modify_by_experiment_name.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_name:
:type experiment_name: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.BatchAddOrModifyRunRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BatchRunResult, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.BatchRunResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BatchRunResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'BatchAddOrModifyRunRequest')
else:
_json = None
request = build_batch_add_or_modify_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_name=experiment_name,
content_type=content_type,
json=_json,
template_url=self.batch_add_or_modify_by_experiment_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('BatchRunResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
batch_add_or_modify_by_experiment_name.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/batch/runs"} # type: ignore
@distributed_trace
def add_or_modify_by_experiment_name(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_name, # type: str
body=None, # type: Optional["_models.CreateRun"]
**kwargs # type: Any
):
# type: (...) -> "_models.Run"
"""add_or_modify_by_experiment_name.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_name:
:type experiment_name: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.CreateRun
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateRun')
else:
_json = None
request = build_add_or_modify_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_name=experiment_name,
content_type=content_type,
json=_json,
template_url=self.add_or_modify_by_experiment_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
add_or_modify_by_experiment_name.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/{runId}"} # type: ignore
@distributed_trace
def get_by_experiment_name(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Run"
"""get_by_experiment_name.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_name:
:type experiment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_name=experiment_name,
template_url=self.get_by_experiment_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_experiment_name.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/{runId}"} # type: ignore
@distributed_trace
def add_or_modify_by_experiment_id(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_id, # type: str
body=None, # type: Optional["_models.CreateRun"]
**kwargs # type: Any
):
# type: (...) -> "_models.Run"
"""add_or_modify_by_experiment_id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_id:
:type experiment_id: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.CreateRun
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateRun')
else:
_json = None
request = build_add_or_modify_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_id=experiment_id,
content_type=content_type,
json=_json,
template_url=self.add_or_modify_by_experiment_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
add_or_modify_by_experiment_id.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{runId}"} # type: ignore
@distributed_trace
def get_by_experiment_id(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Run"
"""get_by_experiment_id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_id:
:type experiment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_id=experiment_id,
template_url=self.get_by_experiment_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_experiment_id.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{runId}"} # type: ignore
@distributed_trace
def add_or_modify_experiment(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
body=None, # type: Optional["_models.CreateRun"]
**kwargs # type: Any
):
# type: (...) -> "_models.Run"
"""add_or_modify_experiment.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.CreateRun
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateRun')
else:
_json = None
request = build_add_or_modify_experiment_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
content_type=content_type,
json=_json,
template_url=self.add_or_modify_experiment.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
add_or_modify_experiment.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/runs/{runId}"} # type: ignore
@distributed_trace
def add(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
body=None, # type: Optional["_models.CreateRun"]
**kwargs # type: Any
):
# type: (...) -> "_models.Run"
"""add.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.CreateRun
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateRun')
else:
_json = None
request = build_add_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
content_type=content_type,
json=_json,
template_url=self.add.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
add.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/runs/{runId}"} # type: ignore
@distributed_trace
def get(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Run"
"""get.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/runs/{runId}"} # type: ignore
@distributed_trace
def delete_tags_by_experiment_id(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_id, # type: str
body=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "_models.Run"
"""delete_tags_by_experiment_id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_id:
:type experiment_id: str
:param body:
:type body: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, '[str]')
else:
_json = None
request = build_delete_tags_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_id=experiment_id,
content_type=content_type,
json=_json,
template_url=self.delete_tags_by_experiment_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_tags_by_experiment_id.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{runId}/tags"} # type: ignore
@distributed_trace
def modify_or_delete_tags_by_experiment_id(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_id, # type: str
body=None, # type: Optional["_models.DeleteOrModifyTags"]
**kwargs # type: Any
):
# type: (...) -> "_models.Run"
"""modify_or_delete_tags_by_experiment_id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_id:
:type experiment_id: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.DeleteOrModifyTags
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'DeleteOrModifyTags')
else:
_json = None
request = build_modify_or_delete_tags_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_id=experiment_id,
content_type=content_type,
json=_json,
template_url=self.modify_or_delete_tags_by_experiment_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
modify_or_delete_tags_by_experiment_id.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{runId}/tags"} # type: ignore
@distributed_trace
def delete_tags_by_experiment_name(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_name, # type: str
body=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "_models.Run"
"""delete_tags_by_experiment_name.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_name:
:type experiment_name: str
:param body:
:type body: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, '[str]')
else:
_json = None
request = build_delete_tags_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_name=experiment_name,
content_type=content_type,
json=_json,
template_url=self.delete_tags_by_experiment_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_tags_by_experiment_name.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/{runId}/tags"} # type: ignore
@distributed_trace
def modify_or_delete_tags_by_experiment_name(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_name, # type: str
body=None, # type: Optional["_models.DeleteOrModifyTags"]
**kwargs # type: Any
):
# type: (...) -> "_models.Run"
"""modify_or_delete_tags_by_experiment_name.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_name:
:type experiment_name: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.DeleteOrModifyTags
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'DeleteOrModifyTags')
else:
_json = None
request = build_modify_or_delete_tags_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_name=experiment_name,
content_type=content_type,
json=_json,
template_url=self.modify_or_delete_tags_by_experiment_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
modify_or_delete_tags_by_experiment_name.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/{runId}/tags"} # type: ignore
@distributed_trace
def delete_tags(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
body=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "_models.Run"
"""delete_tags.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param body:
:type body: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, '[str]')
else:
_json = None
request = build_delete_tags_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
content_type=content_type,
json=_json,
template_url=self.delete_tags.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_tags.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/runs/{runId}/tags"} # type: ignore
@distributed_trace
def delete_run_services_by_experiment_id(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_id, # type: str
body=None, # type: Optional["_models.DeleteRunServices"]
**kwargs # type: Any
):
# type: (...) -> "_models.Run"
"""delete_run_services_by_experiment_id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_id:
:type experiment_id: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.DeleteRunServices
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'DeleteRunServices')
else:
_json = None
request = build_delete_run_services_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_id=experiment_id,
content_type=content_type,
json=_json,
template_url=self.delete_run_services_by_experiment_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_run_services_by_experiment_id.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{runId}/services"} # type: ignore
@distributed_trace
def delete_run_services_by_experiment_name(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_name, # type: str
body=None, # type: Optional["_models.DeleteRunServices"]
**kwargs # type: Any
):
# type: (...) -> "_models.Run"
"""delete_run_services_by_experiment_name.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_name:
:type experiment_name: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.DeleteRunServices
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'DeleteRunServices')
else:
_json = None
request = build_delete_run_services_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_name=experiment_name,
content_type=content_type,
json=_json,
template_url=self.delete_run_services_by_experiment_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_run_services_by_experiment_name.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/{runId}/services"} # type: ignore
@distributed_trace
def delete_run_services(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
body=None, # type: Optional["_models.DeleteRunServices"]
**kwargs # type: Any
):
# type: (...) -> "_models.Run"
"""delete_run_services.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.DeleteRunServices
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'DeleteRunServices')
else:
_json = None
request = build_delete_run_services_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
content_type=content_type,
json=_json,
template_url=self.delete_run_services.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_run_services.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/runs/{runId}/services"} # type: ignore
@distributed_trace
def add_or_modify_run_service_instances(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
node_id, # type: int
body=None, # type: Optional["_models.AddOrModifyRunServiceInstancesRequest"]
**kwargs # type: Any
):
# type: (...) -> "_models.RunServiceInstances"
"""add_or_modify_run_service_instances.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param node_id:
:type node_id: int
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.AddOrModifyRunServiceInstancesRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RunServiceInstances, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.RunServiceInstances
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RunServiceInstances"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'AddOrModifyRunServiceInstancesRequest')
else:
_json = None
request = build_add_or_modify_run_service_instances_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
node_id=node_id,
content_type=content_type,
json=_json,
template_url=self.add_or_modify_run_service_instances.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RunServiceInstances', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
add_or_modify_run_service_instances.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/runs/{runId}/serviceinstances/{nodeId}"} # type: ignore
@distributed_trace
def get_run_service_instances(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
node_id, # type: int
**kwargs # type: Any
):
# type: (...) -> "_models.RunServiceInstances"
"""get_run_service_instances.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param node_id:
:type node_id: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RunServiceInstances, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.RunServiceInstances
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RunServiceInstances"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_run_service_instances_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
node_id=node_id,
template_url=self.get_run_service_instances.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RunServiceInstances', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_run_service_instances.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/runs/{runId}/serviceinstances/{nodeId}"} # type: ignore
@distributed_trace
def get_by_query_by_experiment_name(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
experiment_name, # type: str
body=None, # type: Optional["_models.QueryParams"]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PaginatedRunList"]
"""get_by_query_by_experiment_name.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_name:
:type experiment_name: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.QueryParams
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PaginatedRunList or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.PaginatedRunList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PaginatedRunList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
if body is not None:
_json = self._serialize.body(body, 'QueryParams')
else:
_json = None
request = build_get_by_query_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_name=experiment_name,
content_type=content_type,
json=_json,
template_url=self.get_by_query_by_experiment_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
if body is not None:
_json = self._serialize.body(body, 'QueryParams')
else:
_json = None
request = build_get_by_query_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_name=experiment_name,
content_type=content_type,
json=_json,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PaginatedRunList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_by_query_by_experiment_name.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs:query"} # type: ignore
@distributed_trace
def get_by_query_by_experiment_id(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
experiment_id, # type: str
body=None, # type: Optional["_models.QueryParams"]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PaginatedRunList"]
"""get_by_query_by_experiment_id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_id:
:type experiment_id: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.QueryParams
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PaginatedRunList or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.PaginatedRunList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PaginatedRunList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
if body is not None:
_json = self._serialize.body(body, 'QueryParams')
else:
_json = None
request = build_get_by_query_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_id=experiment_id,
content_type=content_type,
json=_json,
template_url=self.get_by_query_by_experiment_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
if body is not None:
_json = self._serialize.body(body, 'QueryParams')
else:
_json = None
request = build_get_by_query_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_id=experiment_id,
content_type=content_type,
json=_json,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PaginatedRunList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_by_query_by_experiment_id.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs:query"} # type: ignore
@distributed_trace
def get_by_ids_by_experiment_id(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
experiment_id, # type: str
body=None, # type: Optional["_models.GetRunsByIds"]
**kwargs # type: Any
):
# type: (...) -> "_models.BatchRunResult"
"""get_by_ids_by_experiment_id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_id:
:type experiment_id: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.GetRunsByIds
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BatchRunResult, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.BatchRunResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BatchRunResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'GetRunsByIds')
else:
_json = None
request = build_get_by_ids_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_id=experiment_id,
content_type=content_type,
json=_json,
template_url=self.get_by_ids_by_experiment_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('BatchRunResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_ids_by_experiment_id.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/runIds"} # type: ignore
@distributed_trace
def get_by_ids_by_experiment_name(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
experiment_name, # type: str
body=None, # type: Optional["_models.GetRunsByIds"]
**kwargs # type: Any
):
# type: (...) -> "_models.BatchRunResult"
"""get_by_ids_by_experiment_name.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_name:
:type experiment_name: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.GetRunsByIds
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BatchRunResult, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.BatchRunResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BatchRunResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'GetRunsByIds')
else:
_json = None
request = build_get_by_ids_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_name=experiment_name,
content_type=content_type,
json=_json,
template_url=self.get_by_ids_by_experiment_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('BatchRunResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_ids_by_experiment_name.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/runIds"} # type: ignore
@distributed_trace
def cancel_run_with_uri_by_experiment_id(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_id, # type: str
cancelation_reason=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.Run"
"""cancel_run_with_uri_by_experiment_id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_id:
:type experiment_id: str
:param cancelation_reason:
:type cancelation_reason: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_cancel_run_with_uri_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_id=experiment_id,
cancelation_reason=cancelation_reason,
template_url=self.cancel_run_with_uri_by_experiment_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
cancel_run_with_uri_by_experiment_id.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{runId}/cancel"} # type: ignore
@distributed_trace
def cancel_run_with_uri_by_experiment_name(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
run_id, # type: str
experiment_name, # type: str
cancelation_reason=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.Run"
"""cancel_run_with_uri_by_experiment_name.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_name:
:type experiment_name: str
:param cancelation_reason:
:type cancelation_reason: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_cancel_run_with_uri_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_name=experiment_name,
cancelation_reason=cancelation_reason,
template_url=self.cancel_run_with_uri_by_experiment_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
cancel_run_with_uri_by_experiment_name.metadata = {'url': "/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/{runId}/cancel"} # type: ignore
| {
"content_hash": "161a9bec71098cf94507a60acc9f9447",
"timestamp": "",
"source": "github",
"line_count": 3964,
"max_line_length": 282,
"avg_line_length": 42.627144298688194,
"alnum_prop": 0.6330145466166393,
"repo_name": "Azure/azure-sdk-for-python",
"id": "a95f5652e08c5d47c24b640721833bec70066941",
"size": "169474",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/runhistory/operations/_runs_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Gauge(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "indicator"
_path_str = "indicator.gauge"
_valid_props = {
"axis",
"bar",
"bgcolor",
"bordercolor",
"borderwidth",
"shape",
"stepdefaults",
"steps",
"threshold",
}
# axis
# ----
@property
def axis(self):
"""
The 'axis' property is an instance of Axis
that may be specified as:
- An instance of :class:`plotly.graph_objs.indicator.gauge.Axis`
- A dict of string/value properties that will be passed
to the Axis constructor
Supported dict properties:
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
range
Sets the range of this axis.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format
We add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.indicat
or.gauge.axis.Tickformatstop` instances or
dicts with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.indicator.gauge.axis.tickformatstopdefaults),
sets the default property values to use for
elements of
indicator.gauge.axis.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
tickwidth
Sets the tick width (in px).
visible
A single toggle to hide the axis while
preserving interaction like dragging. Default
is true when a cheater plot is present on the
axis, otherwise false
Returns
-------
plotly.graph_objs.indicator.gauge.Axis
"""
return self["axis"]
@axis.setter
def axis(self, val):
self["axis"] = val
# bar
# ---
@property
def bar(self):
"""
Set the appearance of the gauge's value
The 'bar' property is an instance of Bar
that may be specified as:
- An instance of :class:`plotly.graph_objs.indicator.gauge.Bar`
- A dict of string/value properties that will be passed
to the Bar constructor
Supported dict properties:
color
Sets the background color of the arc.
line
:class:`plotly.graph_objects.indicator.gauge.ba
r.Line` instance or dict with compatible
properties
thickness
Sets the thickness of the bar as a fraction of
the total thickness of the gauge.
Returns
-------
plotly.graph_objs.indicator.gauge.Bar
"""
return self["bar"]
@bar.setter
def bar(self, val):
self["bar"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the gauge background color.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the color of the border enclosing the gauge.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) of the border enclosing the gauge.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# shape
# -----
@property
def shape(self):
"""
Set the shape of the gauge
The 'shape' property is an enumeration that may be specified as:
- One of the following enumeration values:
['angular', 'bullet']
Returns
-------
Any
"""
return self["shape"]
@shape.setter
def shape(self, val):
self["shape"] = val
# steps
# -----
@property
def steps(self):
"""
The 'steps' property is a tuple of instances of
Step that may be specified as:
- A list or tuple of instances of plotly.graph_objs.indicator.gauge.Step
- A list or tuple of dicts of string/value properties that
will be passed to the Step constructor
Supported dict properties:
color
Sets the background color of the arc.
line
:class:`plotly.graph_objects.indicator.gauge.st
ep.Line` instance or dict with compatible
properties
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
range
Sets the range of this axis.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
thickness
Sets the thickness of the bar as a fraction of
the total thickness of the gauge.
Returns
-------
tuple[plotly.graph_objs.indicator.gauge.Step]
"""
return self["steps"]
@steps.setter
def steps(self, val):
self["steps"] = val
# stepdefaults
# ------------
@property
def stepdefaults(self):
"""
When used in a template (as
layout.template.data.indicator.gauge.stepdefaults), sets the
default property values to use for elements of
indicator.gauge.steps
The 'stepdefaults' property is an instance of Step
that may be specified as:
- An instance of :class:`plotly.graph_objs.indicator.gauge.Step`
- A dict of string/value properties that will be passed
to the Step constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.indicator.gauge.Step
"""
return self["stepdefaults"]
@stepdefaults.setter
def stepdefaults(self, val):
self["stepdefaults"] = val
# threshold
# ---------
@property
def threshold(self):
"""
The 'threshold' property is an instance of Threshold
that may be specified as:
- An instance of :class:`plotly.graph_objs.indicator.gauge.Threshold`
- A dict of string/value properties that will be passed
to the Threshold constructor
Supported dict properties:
line
:class:`plotly.graph_objects.indicator.gauge.th
reshold.Line` instance or dict with compatible
properties
thickness
Sets the thickness of the threshold line as a
fraction of the thickness of the gauge.
value
Sets a treshold value drawn as a line.
Returns
-------
plotly.graph_objs.indicator.gauge.Threshold
"""
return self["threshold"]
@threshold.setter
def threshold(self, val):
self["threshold"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
axis
:class:`plotly.graph_objects.indicator.gauge.Axis`
instance or dict with compatible properties
bar
Set the appearance of the gauge's value
bgcolor
Sets the gauge background color.
bordercolor
Sets the color of the border enclosing the gauge.
borderwidth
Sets the width (in px) of the border enclosing the
gauge.
shape
Set the shape of the gauge
steps
A tuple of
:class:`plotly.graph_objects.indicator.gauge.Step`
instances or dicts with compatible properties
stepdefaults
When used in a template (as
layout.template.data.indicator.gauge.stepdefaults),
sets the default property values to use for elements of
indicator.gauge.steps
threshold
:class:`plotly.graph_objects.indicator.gauge.Threshold`
instance or dict with compatible properties
"""
def __init__(
self,
arg=None,
axis=None,
bar=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
shape=None,
steps=None,
stepdefaults=None,
threshold=None,
**kwargs
):
"""
Construct a new Gauge object
The gauge of the Indicator plot.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.indicator.Gauge`
axis
:class:`plotly.graph_objects.indicator.gauge.Axis`
instance or dict with compatible properties
bar
Set the appearance of the gauge's value
bgcolor
Sets the gauge background color.
bordercolor
Sets the color of the border enclosing the gauge.
borderwidth
Sets the width (in px) of the border enclosing the
gauge.
shape
Set the shape of the gauge
steps
A tuple of
:class:`plotly.graph_objects.indicator.gauge.Step`
instances or dicts with compatible properties
stepdefaults
When used in a template (as
layout.template.data.indicator.gauge.stepdefaults),
sets the default property values to use for elements of
indicator.gauge.steps
threshold
:class:`plotly.graph_objects.indicator.gauge.Threshold`
instance or dict with compatible properties
Returns
-------
Gauge
"""
super(Gauge, self).__init__("gauge")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.indicator.Gauge
constructor must be a dict or
an instance of :class:`plotly.graph_objs.indicator.Gauge`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("axis", None)
_v = axis if axis is not None else _v
if _v is not None:
self["axis"] = _v
_v = arg.pop("bar", None)
_v = bar if bar is not None else _v
if _v is not None:
self["bar"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("borderwidth", None)
_v = borderwidth if borderwidth is not None else _v
if _v is not None:
self["borderwidth"] = _v
_v = arg.pop("shape", None)
_v = shape if shape is not None else _v
if _v is not None:
self["shape"] = _v
_v = arg.pop("steps", None)
_v = steps if steps is not None else _v
if _v is not None:
self["steps"] = _v
_v = arg.pop("stepdefaults", None)
_v = stepdefaults if stepdefaults is not None else _v
if _v is not None:
self["stepdefaults"] = _v
_v = arg.pop("threshold", None)
_v = threshold if threshold is not None else _v
if _v is not None:
self["threshold"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| {
"content_hash": "6e815803d163fce5d60ca5d8cd3419be",
"timestamp": "",
"source": "github",
"line_count": 669,
"max_line_length": 82,
"avg_line_length": 39.37369207772795,
"alnum_prop": 0.5248092327550207,
"repo_name": "plotly/python-api",
"id": "7d5c7c6fb4aef9151aa8358341f9837f227b488c",
"size": "26341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/indicator/_gauge.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from lxml import html
from lxml import etree #When done delete the unused import line
from lxml.etree import fromstring #i think i can delete this line
from py2neo import neo4j, Graph, Node, Relationship, rel
import requests #no need for this anymore
import sys
import os
import json
import time
#reload(sys)
#sys.setdefaultencoding('utf8')
#GLOBAL VARIABLES
#url with list of experiments
parentURL = 'http://www.nasa.gov/mission_pages/station/research/experiments/experiments_hardware.html'
pagesFailed = []
#ignore, variables used later to store nodes and relationships
experimentsDict = {}
categories = []
subCategory = []
agencies = []
relationships = []
expeditions = []
def parseTable(table):
global subCategory
global categories
global experimentsDict
#these arent really experiements and dont follow the format so they break the program
ExperimentsToIgnore = ['696',
'211',
'490',
'484',
'440',
'432',
'476',
'14',
'448',
'189',
'657',
'441',
'17',
'97',
'419',
'195',
'551',
'27',
'131',
'770',
'292',
'172',
'412',
'934',
#'1062',#REMOVE THIS
#'1973',#REMOVE THIS
'192']
#This part of the function is for determining the section value
tr = table[0]
td = tr[0]
temp = [x for x in td.itertext()] #this gets the text node
section = temp[0]
print ('*************************** Parsing New Section ******************************')
print ('*Parsing section:\t' + section)
categories.append({"title":section,"subs":[]})
#Done determining the section
#iterate throught all the sub sections
#we skip the first element in the list because its an empty <td> used for formatting
for subSection in table.getchildren()[1:]:
#Finds subsection title
#subSection = <tr>
#temp = tr > td > span > text[]
temp = [x for x in subSection[1][0].itertext()]
subSectionTitle = temp[0]
#some sections have N/A and are blank. Could mean that information will be added later but for right now lets just skip them
if subSectionTitle == 'N/A': continue
print ('*Parsing sub section:\t' + subSectionTitle)
subCategory.append({"title":subSectionTitle,"Experiments":[]})
categories[-1]["subs"].append(len(subCategory)-1)
queueRelationship({"collection":"SubCategories","_id":len(subCategory)-1},{"collection":"Categories","_id":len(categories)-1},"In Category")
#link = all the <a> elements in that subsection
#we skip the first element because it is a span and contains the title
for link in subSection[1].getchildren()[1:]:
#Every other tag is a <br/> so lets skip those
if link.tag != 'a': continue
ExpID = getIndex(link.values()[1])
if ExpID in ExperimentsToIgnore: continue
subCategory[-1]["Experiments"].append(ExpID)
experimentsDict[ExpID] = parseExperiment(link.values()[1])
#Adds experiment to list of experiments and stores relationships between categories and subcategories
queueRelationship({"collection":"Experiments","_id":ExpID},{"collection":"Categories","_id":len(categories)-1},"In Category")
queueRelationship({"collection":"Experiments","_id":ExpID},{"collection":"SubCategories","_id":len(subCategory)-1},"In SubCategory")
#Returns experiment id from url ex. experiments/431.html returns 431
def getIndex(link):
ExpID = link[-9:-5]
if ExpID[0] == '/': ExpID = ExpID[1:]
if ExpID[:2] == 's/': ExpID = ExpID[2:]
if ExpID[:3] == 'ts/': ExpID = ExpID[3:]
return ExpID
#takes link of experiment, loads it, parses it and creates relationships
def parseExperiment(link):
###########IMPORTANT VARIABLES
global pagesFailed
global agencies
global categories
global expeditions
location = 'http://www.nasa.gov' + link
#manually copied and pasted the categories and sub categories
mainCategories = ['ISS Science for Everyone',
'Experiment Details',
'Experiment Description',
'Applications',
'Operations',
'Decadal Survey Recommendations',
'Results/More Information', #Exceptions from here down
'Results Publications',
'Ground Based Results Publications',
'ISS Patents',
'Related Publications',
'Related Websites',
'Imagery']
subCategories = ['Principal Investigator(s)',
'Co-Investigator(s)/Collaborator(s)',
'Developer(s)',
'Sponsoring Space Agency',
'Sponsoring Organization',
'Research Benefits',
'ISS Expedition Duration',
'Expeditions Assigned',
'Previous ISS Missions',
'Science Objectives for Everyone',
'Science Results for Everyone',
'Previous Missions',
'Research Overview',
'Description',
'Space Applications',
'Earth Applications',
'Operational Requirements and Protocols',
'Operational Requirements',
'Operational Protocols']
###########LOAD WEBPAGE
print ('>Loading link:\t' + location)
document = html.parse(location)
if document: print ('>Webpage Loaded')
else:
#Assumes page failed to load
print ('>Webpage Failed to locad: ' + location)
pagesFailed.append(location)
return
document = document.getroot()
###########WEBPAGE LOADED
#TEMP VARIABLES
#root = div.pane-content > div
root = document.find_class('pane-content')[2][2]
experiment = {} #using a dictionary to store all the information
text = [x for x in root.itertext()] #all text nodes
counter = 0
category = ''
subCatHeader = ''
temp = {}
#IDENTIFY TITLE and DATE
header = document.find_class('panel-pane pane-custom pane-2')[0][0]
headerText = [x for x in header.itertext()]
#Sets title and date appropriatly then checks if date even exists
title = headerText[0].replace('"',"").replace('\"',"")
date = headerText[0][-8:]
if len(date.split('.')) == 3:
title = headerText[0][:-11].replace('"',"").replace('\"',"")
else:
date = 'unknown'
#####################
experiment['title'] = title
experiment['date'] = date
experiment["_id"] = getIndex(link)
###########LOOP THROUGH ALL THE TEXT NODES
# the first ~16 nodes are just useless links and formatting nodes
for x in text[16:]:
#Filter out the useless nodes that are from the poor web design
node = x
node = node.replace("\n","")
node = node.replace("\r","")
node = node.replace("\t","")
node = node.replace('"',"") #these two lines resolve the problem when creating JSON files
node = node.replace("\"","")
node = node.strip()
test = node.replace(" ","")
if test == "": continue
#useless nodes
if node == '' or node == '^ back to top' or "The following content was provided by" in node or 'OpNom:' in node:
continue
###############################################################
#defines categories
if node in mainCategories:
category = node
if node == 'Imagery': break
experiment[category] = ''
temp = {}
subCatHeader = ''
elif node in subCategories:
#For some unknown reason this if statement is skipped the first time its is true
subCatHeader = node
temp[subCatHeader] = ''
else:
if subCatHeader == '' and category == mainCategories[0]:
#This statement is used to fix the problem above about the if statement being skipped
subCatHeader = node
temp[subCatHeader] = ''
continue
if subCatHeader == '':
#Defines the sub category
if experiment[category] == '': experiment[category] = node
else: experiment[category] = experiment[category] + '\n' + node
else:
#Defines the info associated with the sub category or category
if temp[subCatHeader] == '': temp[subCatHeader] = node
else: temp[subCatHeader] = temp[subCatHeader] + '\n' + node
experiment[category] = temp
#Creates a list of agencies and defines relationships
if subCatHeader == 'Sponsoring Space Agency' and category == 'Experiment Details':
if node not in agencies:
agencies.append(node)
agencyIndex = agencies.index(node)
queueRelationship({"collection":"Agencies","_id":agencyIndex},{"collection":"Experiments","_id":experiment["_id"]},"Performed")
#queueRelationship({"collection":"Agencies","_id":agencyIndex},{"collection":"Categories","_id":len(categories)-1},"Studies") Agencies -> studies -> categories
queueRelationship({"collection":"Agencies","_id":agencyIndex},{"collection":"SubCategories","_id":len(subCategory)-1},"Studies")#UNCOMMENT TO CREATE RELATIONSHIP BETWEEN SUBCATEGORIES AND AGENCIES
if subCatHeader == 'Expeditions Assigned' and category == 'Experiment Details':
#print(node)
exped = node.split(',')
for e in exped:
if e not in expeditions: expeditions.append(e)
expeditionIndex = expeditions.index(e)
queueRelationship({"collection":"Expeditions","_id":expeditionIndex},{"collection":"Experiments","_id":experiment["_id"]},"Assigned")
continue
return experiment
#This function is for testing purposes
def prettyPrintExperiment(experiment):
for cat, info in experiment.items():
print ('Category:\t' + cat)
if isinstance(info,str):
print ('\t\t\t' + info)
else:
for sub, text in info.items():
print ('Sub Cat:\t\t' + sub)
print (text)
def convertToJSON(data):
#converts experiments list to json format
temp = '{'
for cat, info in data.items():
if isinstance(info,str):
info.replace('"',"").replace('\"',"")
if '<!--' in info: continue
if info != '': temp = temp + '"' + cat + '":"' + info + '",'
else:
for sub, text in info.items():
text.replace('"',"").replace('\"',"")
if '<!--' in text: continue
if text != '': temp = temp + '"' + cat + ' - ' + sub + '":"' + text + '",'
temp = temp[:-1] + '}'
temp = temp.replace("\o","").replace("\ ","").replace("”","")
return temp
#Formats to JSON format
def formatCat(data):
temp = ''
for key,value in enumerate(data):
temp = temp + '{"_id":"' + str(key) + '","title":"' + value['title'] + '"}'
return temp
#Formats to JSON format
def formatAgency(data):
temp = ''
for key,value in enumerate(data):
temp = temp + '{"_id":"' + str(key) + '","title":"' + value + '"}'
return temp
#Formats to JSON format
def formatExpeditions():
global expeditions
temp = ''
for key,val in enumerate(expeditions):
temp = temp + '{"_id":"' + str(key) + '","title":"' + val.replace('\u25cf','') + '"}'
return temp.replace('\u25cf','')
#Adds relationships to a list
def queueRelationship(start,end,name):
global relationships
temp = {"start":start,"end":end,"name":name}
if name == 'Studies' and temp in relationships: return
if temp not in relationships: relationships.append(temp)
#relationships.append(temp)
#takes list of relationships and adds them to the database.
def createRelationships():
global relationships
graph = Graph('http://localhost:7474/db/data')
for r in relationships:
NodeA = graph.find_one(r["start"]["collection"],property_key = "_id", property_value = str(r["start"]["_id"]))
NodeB = graph.find_one(r["end"]["collection"],property_key = "_id", property_value = str(r["end"]["_id"]))
graph.create(rel(NodeA,r["name"],NodeB))
##############################################
#
# This is where the functions stop and where the program begins to execute
#
##############################################
#Gets webpage and checks if loaded properly
document = html.parse(parentURL)
if document: print ('Webpage Loaded')
else:
#Assuming HTTP status != 200
print ('Failed to load page')
sys.exit
#document represents <html> element
document = document.getroot()
#<div class="pane-content"> --> <div>
#root represents the <div> that holds all the table which hold experiment links aka the parent node
root = document.find_class('pane-content')[2][3]
counter = 0
for child in root.getchildren():
if child.tag == 'table':
#if counter == 2:
parseTable(child)
#counter = counter + 1
#prettyPrintExperiment(exp)
print("****************************************")
print("* Formatting and Importing Documents to MongoDB and Neo4j")
f = open('C:\\Users\\ajbuchan\\Desktop\\Project\\experiment.json','wb')
jobj = ''
#######################################
#
# ***IMPORTANT***
#
# change file directories to correctly match environment
#
#
#######################################
for x in experimentsDict.items():
jobj = jobj + convertToJSON(x[1])
f.write(jobj.replace("\r","").replace("\n","").replace("\t","").encode())
f.close()
os.system("\"C:\\program files\\mongodb\\server\\3.2\\bin\\mongoimport.exe\" --db ISSExperiments --collection Experiments --file C:\\users\\ajbuchan\\desktop\\Project\\experiment.json")
subs = formatCat(subCategory)
f = open('C:\\Users\\ajbuchan\\Desktop\\Project\\subCats.json','w+')
f.write(subs)
f.close()
os.system("\"C:\\program files\\mongodb\\server\\3.2\\bin\\mongoimport.exe\" --db ISSExperiments --collection SubCategories --file C:\\users\\ajbuchan\\desktop\\Project\\subCats.json")
cats = formatCat(categories)
f = open('C:\\Users\\ajbuchan\\Desktop\\Project\\cats.json','w+')
f.write(cats)
f.close()
os.system("\"C:\\program files\\mongodb\\server\\3.2\\bin\\mongoimport.exe\" --db ISSExperiments --collection Categories --file C:\\users\\ajbuchan\\desktop\\Project\\cats.json")
age = formatAgency(agencies)
f = open('C:\\Users\\ajbuchan\\Desktop\\Project\\agencies.json','w+')
f.write(age)
f.close()
os.system("\"C:\\program files\\mongodb\\server\\3.2\\bin\\mongoimport.exe\" --db ISSExperiments --collection Agencies --file C:\\users\\ajbuchan\\desktop\\Project\\agencies.json")
e = formatExpeditions()
f = open('C:\\Users\\ajbuchan\\Desktop\\Project\\expeditions.json','w+')
f.write(e)
f.close()
os.system("\"C:\\program files\\mongodb\\server\\3.2\\bin\\mongoimport.exe\" --db ISSExperiments --collection Expeditions --file C:\\users\\ajbuchan\\desktop\\Project\\expeditions.json")
print("Done updating, waiting for mongo-connector")
time.sleep(400)
print("mongo-connector is either done or encountered a problem")
print("Generating Relationships")
createRelationships()
print("All Done")
| {
"content_hash": "9b28547ad0079ba20735b9029a4cf617",
"timestamp": "",
"source": "github",
"line_count": 399,
"max_line_length": 216,
"avg_line_length": 40.19799498746867,
"alnum_prop": 0.5698609639004926,
"repo_name": "davidmeza1/KA_Interns",
"id": "b1778bae29c963d56b00c6c08817843949a6e1b4",
"size": "16593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/Parser/Scraper3.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "472495"
},
{
"name": "HTML",
"bytes": "1857244"
},
{
"name": "Python",
"bytes": "6914"
},
{
"name": "R",
"bytes": "83467"
}
],
"symlink_target": ""
} |
"""Wide ResNet 40-4 architecture for CIFAR-100."""
import tensorflow as tf
from ._wrn import _wrn
from ..datasets.cifar100 import cifar100
from .testproblem import TestProblem
class cifar100_wrn404(TestProblem):
"""DeepOBS test problem class for the Wide Residual Network 40-4 architecture\
for CIFAR-100.
Details about the architecture can be found in the `original paper`_.
A weight decay is used on the weights (but not the biases)
which defaults to ``5e-4``.
Training settings recommenden in the `original paper`_:
``batch size = 128``, ``num_epochs = 200`` using the Momentum optimizer
with :math:`\\mu = 0.9` and an initial learning rate of ``0.1`` with a decrease by
``0.2`` after ``60``, ``120`` and ``160`` epochs.
.. _original paper: https://arxiv.org/abs/1605.07146
Args:
batch_size (int): Batch size to use.
weight_decay (float): Weight decay factor. Weight decay (L2-regularization)
is used on the weights but not the biases.
Defaults to ``5e-4``.
Attributes:
dataset: The DeepOBS data set class for Cifar-100.
train_init_op: A tensorflow operation initializing the test problem for the
training phase.
train_eval_init_op: A tensorflow operation initializing the test problem for
evaluating on training data.
test_init_op: A tensorflow operation initializing the test problem for
evaluating on test data.
losses: A tf.Tensor of shape (batch_size, ) containing the per-example loss
values.
regularizer: A scalar tf.Tensor containing a regularization term.
accuracy: A scalar tf.Tensor containing the mini-batch mean accuracy.
"""
def __init__(self, batch_size, weight_decay=0.0005):
"""Create a new WRN 40-4 test problem instance on Cifar-100.
Args:
batch_size (int): Batch size to use.
weight_decay (float): Weight decay factor. Weight decay (L2-regularization)
is used on the weights but not the biases.
Defaults to ``5e-4``.
"""
super(cifar100_wrn404, self).__init__(batch_size, weight_decay)
def set_up(self):
"""Set up the Wide ResNet 40-4 test problem on Cifar-100."""
self.dataset = cifar100(self._batch_size)
self.train_init_op = self.dataset.train_init_op
self.train_eval_init_op = self.dataset.train_eval_init_op
self.test_init_op = self.dataset.test_init_op
training = tf.equal(self.dataset.phase, "train")
x, y = self.dataset.batch
linear_outputs = _wrn(
x,
training,
num_residual_units=6,
widening_factor=4,
num_outputs=100,
weight_decay=self._weight_decay)
self.losses = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=y, logits=linear_outputs)
y_pred = tf.argmax(linear_outputs, 1)
y_correct = tf.argmax(y, 1)
correct_prediction = tf.equal(y_pred, y_correct)
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
self.regularizer = tf.losses.get_regularization_loss()
| {
"content_hash": "149c156cf05f65705dae5ffab29440f4",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 85,
"avg_line_length": 39.1125,
"alnum_prop": 0.6538830297219559,
"repo_name": "fsschneider/DeepOBS",
"id": "f1d219360083e72691725a167adb737cf6cc6131",
"size": "3153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deepobs/tensorflow/testproblems/cifar100_wrn404.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "368026"
},
{
"name": "Shell",
"bytes": "8516"
}
],
"symlink_target": ""
} |
kDefaultMMFontFileName = "mmfont.pfa"
kInstancesDataFileName = "instances"
kCompositeDataName = "temp.composite.dat"
###################################################
__copyright__ = """
Copyright 2014-2016 Adobe Systems Incorporated (http://www.adobe.com/). All Rights Reserved.
This software is licensed as OpenSource, under the Apache License, Version 2.0. This license is available at: http://opensource.org/licenses/Apache-2.0.
"""
__doc__ = """
Save Files for MakeInstances v2.0 - April 12 2016
This script will do part of the work to create a set of single-master fonts
("instances") from a Multiple Master (MM) FontLab font. It will save a
Type 1 MM font (needed by the makeInstances program) and, in some cases,
a text file named 'temp.composite.dat' that contains data related with
composite glyphs.
You must then run the makeInstances program to actually build the instance Type 1
fonts. makeInstances can remove working glyphs, and rename MM-exception glyphs.
It will also do overlap removal, and autohint the instance fonts. This last is
desirable, as autohinting which is specific to an instance font is usually
significantly better than the hinting from interpolating the MM font hints.
As always with overlap removal, you should check all affected glyphs - it
doesn't always do the right thing.
Note that the makeInstances program can be run alone, given an MM Type1 font
file. However, if you use the ExceptionSuffixes keyword, then you must run
this script first. The script will make a file that identifies composite glyphs,
and allows makeInstances to correctly substitute contours in the composite glyph
from the exception glyph. This is necessary because FontLab cannot write all the
composite glyphs as Type 1 composites (also known as SEAC glyphs). This script
must be run again to renew this data file whenever changes are made to composite
glyphs.
Both this script and the "makeInstances" program depend on info provided by an
external text file named "instances", which contains all the instance-specific
values. The "instances" file must be a simple text file, located in the same
folder as the MM FontLab file.
For information on how to format the "instances" file, please read the
documentation in the InstanceGenerator.py script.
==================================================
Versions:
v2.0 - Apr 12 2016 - Added step to fix the MM FontBBox values of the mmfont.pfa file,
when the VFB's UPM value is not 1000 (long-standing FontLab bug).
v1.0 - Feb 15 2010 - Initial release
"""
import copy
import re
import os
kFieldsKey = "#KEYS:"
kFamilyName = "FamilyName"
kFontName = "FontName"
kFullName = "FullName"
kWeight = "Weight"
kCoordsKey = "Coords"
kIsBoldKey = "IsBold" # This is changed to kForceBold in the instanceDict when reading in the instance file.
kForceBold = "ForceBold"
kIsItalicKey = "IsItalic"
kExceptionSuffixes = "ExceptionSuffixes"
kExtraGlyphs = "ExtraGlyphs"
kFixedFieldKeys = {
# field index: key name
0:kFamilyName,
1:kFontName,
2:kFullName,
3:kWeight,
4:kCoordsKey,
5:kIsBoldKey,
}
kNumFixedFields = len(kFixedFieldKeys)
kBlueScale = "BlueScale"
kBlueShift = "BlueShift"
kBlueFuzz = "BlueFuzz"
kBlueValues = "BlueValues"
kOtherBlues = "OtherBlues"
kFamilyBlues = "FamilyBlues"
kFamilyOtherBlues = "FamilyOtherBlues"
kStdHW = "StdHW"
kStdVW = "StdVW"
kStemSnapH = "StemSnapH"
kStemSnapV = "StemSnapV"
kAlignmentZonesKeys = [kBlueValues, kOtherBlues, kFamilyBlues, kFamilyOtherBlues]
kTopAlignZonesKeys = [kBlueValues, kFamilyBlues]
kMaxTopZonesSize = 14 # 7 zones
kBotAlignZonesKeys = [kOtherBlues, kFamilyOtherBlues]
kMaxBotZonesSize = 10 # 5 zones
kStdStemsKeys = [kStdHW, kStdVW]
kMaxStdStemsSize = 1
kStemSnapKeys = [kStemSnapH, kStemSnapV]
kMaxStemSnapSize = 12 # including StdStem
class ParseError(ValueError):
pass
def validateArrayValues(arrayList, valuesMustBePositive):
for i in range(len(arrayList)):
try:
arrayList[i] = eval(arrayList[i])
except (NameError, SyntaxError):
return
if valuesMustBePositive:
if arrayList[i] < 0:
return
return arrayList
def readInstanceFile(instancesFilePath):
f = open(instancesFilePath, "rt")
data = f.read()
f.close()
lines = data.splitlines()
i = 0
parseError = 0
keyDict = copy.copy(kFixedFieldKeys)
numKeys = kNumFixedFields
numLines = len(lines)
instancesList = []
for i in range(numLines):
line = lines[i]
# Skip over blank lines
line2 = line.strip()
if not line2:
continue
# Get rid of all comments. If we find a key definition comment line, parse it.
commentIndex = line.find('#')
if commentIndex >= 0:
if line.startswith(kFieldsKey):
if instancesList:
print "ERROR: Header line (%s) must preceed a data line." % kFieldsKey
raise ParseError
# parse the line with the field names.
line = line[len(kFieldsKey):]
line = line.strip()
keys = line.split('\t')
keys = map(lambda name: name.strip(), keys)
numKeys = len(keys)
k = kNumFixedFields
while k < numKeys:
keyDict[k] = keys[k]
k +=1
continue
else:
line = line[:commentIndex]
continue
# Must be a data line.
fields = line.split('\t')
fields = map(lambda datum: datum.strip(), fields)
numFields = len(fields)
if (numFields != numKeys):
print "ERROR: In line %s, the number of fields %s does not match the number of key names %s (FamilyName, FontName, FullName, Weight, Coords, IsBold)." % (i+1, numFields, numKeys)
parseError = 1
continue
instanceDict= {}
#Build a dict from key to value. Some kinds of values needs special processing.
for k in range(numFields):
key = keyDict[k]
field = fields[k]
if not field:
continue
if field in ["Default", "None", "FontBBox"]:
# FontBBox is no longer supported - I calculate the real
# instance fontBBox from the glyph metrics instead,
continue
if key == kFontName:
value = field
elif key in [kExtraGlyphs, kExceptionSuffixes]:
value = eval(field)
elif key in [kIsBoldKey, kIsItalicKey, kCoordsKey]:
try:
value = eval(field) # this works for all three fields.
if key == kIsBoldKey: # need to convert to Type 1 field key.
instanceDict[key] = value
# add kForceBold key.
key = kForceBold
if value == 1:
value = "true"
else:
value = "false"
elif key == kIsItalicKey:
if value == 1:
value = "true"
else:
value = "false"
elif key == kCoordsKey:
if type(value) == type(0):
value = (value,)
except (NameError, SyntaxError):
print "ERROR: In line %s, the %s field has an invalid value." % (i+1, key)
parseError = 1
continue
elif field[0] in ["[","{"]: # it is a Type 1 array value. Turn it into a list and verify that there's an even number of values for the alignment zones
value = field[1:-1].split() # Remove the begin and end brackets/braces, and make a list
if key in kAlignmentZonesKeys:
if len(value) % 2 != 0:
print "ERROR: In line %s, the %s field does not have an even number of values." % (i+1, key)
parseError = 1
continue
if key in kTopAlignZonesKeys: # The Type 1 spec only allows 7 top zones (7 pairs of values)
if len(value) > kMaxTopZonesSize:
print "ERROR: In line %s, the %s field has more than %d values." % (i+1, key, kMaxTopZonesSize)
parseError = 1
continue
else:
newArray = validateArrayValues(value, False) # False = values do NOT have to be all positive
if newArray:
value = newArray
else:
print "ERROR: In line %s, the %s field contains invalid values." % (i+1, key)
parseError = 1
continue
currentArray = value[:] # make copy, not reference
value.sort()
if currentArray != value:
print "WARNING: In line %s, the values in the %s field were sorted in ascending order." % (i+1, key)
if key in kBotAlignZonesKeys: # The Type 1 spec only allows 5 top zones (5 pairs of values)
if len(value) > kMaxBotZonesSize:
print "ERROR: In line %s, the %s field has more than %d values." % (i+1, key, kMaxBotZonesSize)
parseError = 1
continue
else:
newArray = validateArrayValues(value, False) # False = values do NOT have to be all positive
if newArray:
value = newArray
else:
print "ERROR: In line %s, the %s field contains invalid values." % (i+1, key)
parseError = 1
continue
currentArray = value[:] # make copy, not reference
value.sort()
if currentArray != value:
print "WARNING: In line %s, the values in the %s field were sorted in ascending order." % (i+1, key)
if key in kStdStemsKeys:
if len(value) > kMaxStdStemsSize:
print "ERROR: In line %s, the %s field can only have %d value." % (i+1, key, kMaxStdStemsSize)
parseError = 1
continue
else:
newArray = validateArrayValues(value, True) # True = all values must be positive
if newArray:
value = newArray
else:
print "ERROR: In line %s, the %s field has an invalid value." % (i+1, key)
parseError = 1
continue
if key in kStemSnapKeys: # The Type 1 spec only allows 12 stem widths, including 1 standard stem
if len(value) > kMaxStemSnapSize:
print "ERROR: In line %s, the %s field has more than %d values." % (i+1, key, kMaxStemSnapSize)
parseError = 1
continue
else:
newArray = validateArrayValues(value, True) # True = all values must be positive
if newArray:
value = newArray
else:
print "ERROR: In line %s, the %s field contains invalid values." % (i+1, key)
parseError = 1
continue
currentArray = value[:] # make copy, not reference
value.sort()
if currentArray != value:
print "WARNING: In line %s, the values in the %s field were sorted in ascending order." % (i+1, key)
else:
# either a single number or a string.
if re.match(r"^[-.\d]+$", field):
value = field #it is a Type 1 number. Pass as is, as a string.
else:
value = field
instanceDict[key] = value
if (kStdHW in instanceDict and kStemSnapH not in instanceDict) or (kStdHW not in instanceDict and kStemSnapH in instanceDict):
print "ERROR: In line %s, either the %s value or the %s values are missing or were invalid." % (i+1, kStdHW, kStemSnapH)
parseError = 1
elif (kStdHW in instanceDict and kStemSnapH in instanceDict): # cannot be just 'else' because it will generate a 'KeyError' when these hinting parameters are not provided in the 'instances' file
if instanceDict[kStemSnapH][0] != instanceDict[kStdHW][0]:
print "ERROR: In line %s, the first value in %s must be the same as the %s value." % (i+1, kStemSnapH, kStdHW)
parseError = 1
if (kStdVW in instanceDict and kStemSnapV not in instanceDict) or (kStdVW not in instanceDict and kStemSnapV in instanceDict):
print "ERROR: In line %s, either the %s value or the %s values are missing or were invalid." % (i+1, kStdVW, kStemSnapV)
parseError = 1
elif (kStdVW in instanceDict and kStemSnapV in instanceDict): # cannot be just 'else' because it will generate a 'KeyError' when these hinting parameters are not provided in the 'instances' file
if instanceDict[kStemSnapV][0] != instanceDict[kStdVW][0]:
print "ERROR: In line %s, the first value in %s must be the same as the %s value." % (i+1, kStemSnapV, kStdVW)
parseError = 1
instancesList.append(instanceDict)
if parseError or len(instancesList) == 0:
raise(ParseError)
return instancesList
def saveCompositeInfo(fontMM, mmParentDir):
filePath = os.path.join(mmParentDir, kCompositeDataName)
numGlyphs = len(fontMM)
glyphDict = {}
numMasters = fontMM.glyphs[0].layers_number
for gid in range(numGlyphs):
glyph = fontMM.glyphs[gid]
lenComps = len(glyph.components)
if lenComps == 0:
continue
compList = []
glyphDict[glyph.name] = compList
numBaseContours = glyph.GetContoursNumber()
pathIndex = numBaseContours
for cpi in range(lenComps):
component = glyph.components[cpi]
compGlyph = fontMM.glyphs[component.index]
compName = compGlyph.name,
compEntry = [compName, numBaseContours + cpi]
metricsList = [None]*numMasters
seenAnyChange = 0
for mi in range(numMasters):
shift = component.deltas[mi]
scale = component.scales[mi]
shiftEntry = scaleEntry = None
if (shift.x != 0) or (shift.y != 0):
shiftEntry = (shift.x, shift.y)
if (scale.x != 1.0) or (scale.y !=1.0 ):
scaleEntry = (scale.x, scale.y)
if scaleEntry or shiftEntry:
metricsEntry = (shiftEntry, scaleEntry)
seenAnyChange = 1
else:
metricsEntry = None
metricsList[mi] = metricsEntry
compName = fontMM.glyphs[component.index].name
if seenAnyChange:
compList.append([compName, pathIndex, metricsList])
else:
compList.append([compName, pathIndex, None])
pathIndex += compGlyph.GetContoursNumber()
fp = open(filePath, "wt")
fp.write(repr(glyphDict))
fp.close()
def parseVals(valList):
valList = valList.split()
valList = map(eval, valList)
return valList
def fixFontBBox(data, pfaPath):
bboxMatch = re.search(r"/FontBBox\s*\{\{([^}]+)\}\s*\{([^}]+)\}\s*\{([^}]+)\}\s*\{([^}]+)\}\}", data)
if not bboxMatch:
print "Failed to find MM FontBBox %s" % pfaPath
return
pfaBBox = [bboxMatch.group(1), bboxMatch.group(2), bboxMatch.group(3), bboxMatch.group(4)]
pfaBBox = map(parseVals, pfaBBox)
print "Calculating correct MM FontBBox..."
mastersRange = range(fl.font.glyphs[0].layers_number)
flBBox = [[], [], [], []]
for i in range(4):
for m in mastersRange:
flBBox[i].append([])
c = 0
for flGlyph in fl.font.glyphs:
for m in mastersRange:
bbox = flGlyph.GetBoundingRect(m)
flBBox[0][m].append(bbox.ll.x)
flBBox[1][m].append(bbox.ll.y)
flBBox[2][m].append(bbox.ur.x)
flBBox[3][m].append(bbox.ur.y)
for m in mastersRange:
flBBox[0][m] = int( round( min(flBBox[0][m])) )
flBBox[1][m] = int( round( min(flBBox[1][m])) )
flBBox[2][m] = int( round( max(flBBox[2][m])) )
flBBox[3][m] = int( round( max(flBBox[3][m])) )
if pfaBBox == flBBox:
print "mmfont.pfa and fl.font have the same MM FontBBox values."
else:
matchGroups = bboxMatch.groups()
numGroups = 4 # by definition of regex above.
prefix = data[:bboxMatch.start(1)-1]
postfix = data[bboxMatch.end(4)+1:]
newString = []
for i in range(numGroups):
newString.append("{")
for m in mastersRange:
newString.append
newString.append("%s" % (flBBox[i][m]) )
newString.append("}")
newString = " ".join(newString)
data = prefix + newString + postfix
try:
fp = open(pfaPath, "wt")
fp.write(data)
fp.close()
print "Updated mmfont.pfa with correct MM FontBBox values."
except (OSError,IOError):
print "Failed to open and write %s" % pfaPath
def saveFiles():
try:
parentDir = os.path.dirname(os.path.abspath(fl.font.file_name))
except AttributeError:
print "The font has not been saved. Please save the font and try again."
return
instancesFilePath = os.path.join(parentDir, kInstancesDataFileName)
if not os.path.isfile(instancesFilePath):
print "Could not find the file named '%s' in the path below\n\t%s" % (kInstancesDataFileName, parentDir)
return
try:
print "Parsing instances file..."
instancesList = readInstanceFile(instancesFilePath)
except ParseError:
print "Error parsing file or file is empty."
return
# Set FontLab preferences
flPrefs = Options()
flPrefs.Load()
flPrefs.T1Terminal = 0 # so we don't have to close the dialog with each instance.
flPrefs.T1Encoding = 1 # always write Std Encoding.
flPrefs.T1Decompose = 1 # Do decompose SEAC chars
flPrefs.T1Autohint = 0 # Do not autohint unhinted chars
# Generate mmfont.pfa
pfaPath = os.path.join(parentDir, kDefaultMMFontFileName)
print "Saving Type 1 MM font file to:%s\t%s" % (os.linesep, pfaPath)
fl.GenerateFont(eval("ftTYPE1ASCII_MM"), pfaPath)
# Check if mmfont.pfa was indeed generated
if not os.path.exists(pfaPath):
print "Failed to find %s" % pfaPath
return
# Save the composite glyph data, but only if it's necessary
if (kExceptionSuffixes in instancesList[0] or kExtraGlyphs in instancesList[0]):
compositePath = os.path.join(parentDir, kCompositeDataName)
print "Saving composite glyphs data to:%s\t%s" % (os.linesep, compositePath)
saveCompositeInfo(fl.font, parentDir)
# Fix the FontBBox values if the font's UPM is not 1000
if fl.font.upm != 1000:
try:
fp = open(pfaPath, "rt")
data = fp.read()
fp.close()
except (OSError,IOError):
print "Failed to open and read %s" % pfaPath
return
fixFontBBox(data, pfaPath)
print "Done!"
def run():
global debug
if fl.count == 0:
print 'No font opened.'
return
if len(fl.font) == 0:
print 'The font has no glyphs.'
return
if fl.font[0].layers_number == 1:
print 'The font is not MM.'
return
else:
fl.output = ''
saveFiles()
if __name__ == "__main__":
run()
| {
"content_hash": "13f2c7a98f5a25308e6723f7301c0757",
"timestamp": "",
"source": "github",
"line_count": 510,
"max_line_length": 196,
"avg_line_length": 33.4156862745098,
"alnum_prop": 0.684661424715409,
"repo_name": "adobe-type-tools/fontlab-scripts",
"id": "3b241aa3ddbeb081891883e568d8abcc51dc87e0",
"size": "17235",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "MM Designs/SaveFilesForMakeInstances.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "288366"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from .views import (
AliTemplateView,
AliVideoconferenciasDetailView,
AreaDetailView,
AulaDetailView,
CuerpoDetailView,
IndexView,
LaboratorioDetailView,
LaboratorioInformaticoDetailView,
LaboratorioInformaticoListView,
NivelDetailView,
recurso_eventos_json,
RecursoAliDetailView,
SolicitudAliReclamosSugerencias,
SolicitudAulaView,
SolicitudInstalacionSoftwareView,
SolicitudLaboratorioInformaticoView,
SolicitudMaterialMultimediaView,
TipoLaboratorioDetailView,
TipoRecursoAliDetailView,
TvCuerposListView,
TvVisorCuerposDetailView,
TvVisorDetailView,
)
urlpatterns = [
url(
r'^$',
IndexView.as_view(),
name='index'
),
url(
r'^cuerpo/(?P<numero>\d+)/$',
CuerpoDetailView.as_view(),
name='cuerpo_detalle'
),
url(
r'^cuerpo/(?P<numero_cuerpo>\d+)/nivel/(?P<numero_nivel>-?\d+)/$',
NivelDetailView.as_view(),
name='nivel_detalle'
),
url(
r'^aula/(?P<pk>\d+)/$',
AulaDetailView.as_view(),
name='aula_detalle'
),
url(
r'^area/(?P<slug>[-\w]+)/$',
AreaDetailView.as_view(),
name='area_detalle'
),
url(
r'^recurso/(?P<pk>\d+)/eventos/$',
recurso_eventos_json,
name='recurso_eventos_json'
),
url(
r'^ali/$',
AliTemplateView.as_view(),
name='ali_index'
),
url(
r'^ali/videoconferencias/$',
AliVideoconferenciasDetailView.as_view(),
name='ali_videoconferencias_detalle'
),
url(
r'^laboratorios/informatica/$',
LaboratorioInformaticoListView.as_view(),
name='laboratorio_informatico_listado'
),
url(
r'^laboratorio/informatica/(?P<alias>[A-Za-z0-9]+)/$',
LaboratorioInformaticoDetailView.as_view(),
name='laboratorio_informatico_detalle'
),
url(
r'^laboratorios/(?P<slug>[-\w]+)/$',
TipoLaboratorioDetailView.as_view(),
name='tipo_laboratorio_detalle'
),
url(
r'^laboratorio/(?P<tipo>[A-Za-z0-9]+)/(?P<alias>[A-Za-z0-9]+)/$',
LaboratorioDetailView.as_view(),
name='laboratorio_detalle'
),
url(
r'^ali/(?P<slug>[-\w]+)/$',
TipoRecursoAliDetailView.as_view(),
name='tipo_recurso_ali_detalle'
),
url(
r'^ali/(?P<tipo>[-\w]+)/(?P<identificador>[A-Za-z0-9_-]+)/$',
RecursoAliDetailView.as_view(),
name='recurso_ali_detalle'
),
url(
r'^solicitud/ali/reclamos_sugerencias/$',
SolicitudAliReclamosSugerencias.as_view(),
name='solicitud_ali_reclamos_sugerencias'
),
url(
r'^solicitud/aula/$',
SolicitudAulaView.as_view(),
name='solicitud_aula'
),
url(
r'^solicitud/instalacion_software/$',
SolicitudInstalacionSoftwareView.as_view(),
name='solicitud_instalacion_software'
),
url(
r'^solicitud/laboratorio/informatica/$',
SolicitudLaboratorioInformaticoView.as_view(),
name='solicitud_laboratorio_informatico'
),
url(
r'^solicitud/material_multimedia/$',
SolicitudMaterialMultimediaView.as_view(),
name='solicitud_material_multimedia'
),
url(
r'^tv/cuerpos/$',
TvCuerposListView.as_view(),
name='tv_cuerpos'
),
url(
r'^tv/visor/(?P<slug>[-\w]+)/$',
TvVisorDetailView.as_view(),
name='tv_visor'
),
url(
r'^tv/visor/(?P<slug>[-\w]+)/cuerpos/$',
TvVisorCuerposDetailView.as_view(),
name='tv_visor_cuerpos'
),
# TODO: Eliminar. Vistas obsoletas debido a las vistas de VisorTv. Sólo se
# mantienen para compatibilidad con los visores que funcionan actualmente.
url(
r'^tv/bedelia/(?P<slug>[-\w]+)/$',
TvVisorDetailView.as_view(),
name='tv_bedelia'
),
url(
r'^tv/bedelia/(?P<slug>[-\w]+)/cuerpos/$',
TvVisorCuerposDetailView.as_view(),
name='tv_bedelia_cuerpos'
),
]
| {
"content_hash": "5fa5abbff6923889f00f1187052f8d1e",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 78,
"avg_line_length": 27.07843137254902,
"alnum_prop": 0.5821868211440985,
"repo_name": "utn-frm-si/reservas",
"id": "0149c118d8e98a1f596642f67ed4c3512f0f8b0e",
"size": "4160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app_reservas/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3994"
},
{
"name": "HTML",
"bytes": "92217"
},
{
"name": "Python",
"bytes": "134634"
},
{
"name": "Shell",
"bytes": "865"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from collections import deque
from djblets.util.compat.django.template.loader import render_to_string
from reviewboard.reviews.errors import DepthLimitExceededError
#: The maximum depth limit of any action instance.
MAX_DEPTH_LIMIT = 2
#: The mapping of all action IDs to their corresponding action instances.
_all_actions = {}
#: All top-level action IDs (in their left-to-right order of appearance).
_top_level_ids = deque()
#: Determines if the default action instances have been populated yet.
_populated = False
class BaseReviewRequestAction(object):
"""A base class for an action that can be applied to a review request.
Creating an action requires subclassing :py:class:`BaseReviewRequestAction`
and overriding any fields/methods as desired. Different instances of the
same subclass can also override the class fields with their own instance
fields.
Example:
.. code-block:: python
class UsedOnceAction(BaseReviewRequestAction):
action_id = 'once'
label = 'This is used once.'
class UsedMultipleAction(BaseReviewRequestAction):
def __init__(self, action_id, label):
super(UsedMultipleAction, self).__init__()
self.action_id = 'repeat-' + action_id
self.label = 'This is used multiple times,'
Note:
Since the same action will be rendered for multiple different users in
a multithreaded environment, the action state should not be modified
after initialization. If we want different action attributes at
runtime, then we can override one of the getter methods (such as
:py:meth:`get_label`), which by default will simply return the original
attribute from initialization.
"""
#: The ID of this action. Must be unique across all types of actions and
#: menu actions, at any depth.
action_id = None
#: The label that displays this action to the user.
label = None
#: The URL to invoke if this action is clicked.
url = '#'
#: Determines if this action should be initially hidden to the user.
hidden = False
def __init__(self):
"""Initialize this action.
By default, actions are top-level and have no children.
"""
self._parent = None
self._max_depth = 0
def copy_to_dict(self, context):
"""Copy this action instance to a dictionary.
Args:
context (django.template.Context):
The collection of key-value pairs from the template.
Returns:
dict: The corresponding dictionary.
"""
return {
'action_id': self.action_id,
'label': self.get_label(context),
'url': self.get_url(context),
'hidden': self.get_hidden(context),
}
def get_label(self, context):
"""Return this action's label.
Args:
context (django.template.Context):
The collection of key-value pairs from the template.
Returns:
unicode: The label that displays this action to the user.
"""
return self.label
def get_url(self, context):
"""Return this action's URL.
Args:
context (django.template.Context):
The collection of key-value pairs from the template.
Returns:
unicode: The URL to invoke if this action is clicked.
"""
return self.url
def get_hidden(self, context):
"""Return whether this action should be initially hidden to the user.
Args:
context (django.template.Context):
The collection of key-value pairs from the template.
Returns:
bool: Whether this action should be initially hidden to the user.
"""
return self.hidden
def should_render(self, context):
"""Return whether or not this action should render.
The default implementation is to always render the action everywhere.
Args:
context (django.template.Context):
The collection of key-value pairs available in the template
just before this action is to be rendered.
Returns:
bool: Determines if this action should render.
"""
return True
@property
def max_depth(self):
"""Lazily compute the max depth of any action contained by this action.
Top-level actions have a depth of zero, and child actions have a depth
that is one more than their parent action's depth.
Algorithmically, the notion of max depth is equivalent to the notion of
height in the context of trees (from graph theory). We decided to use
this term instead so as not to confuse it with the dimensional height
of a UI element.
Returns:
int: The max depth of any action contained by this action.
"""
return self._max_depth
def reset_max_depth(self):
"""Reset the max_depth of this action and all its ancestors to zero."""
self._max_depth = 0
if self._parent:
self._parent.reset_max_depth()
def render(self, context, action_key='action',
template_name='reviews/action.html'):
"""Render this action instance and return the content as HTML.
Args:
context (django.template.Context):
The collection of key-value pairs that is passed to the
template in order to render this action.
action_key (unicode, optional):
The key to be used for this action in the context map.
template_name (unicode, optional):
The name of the template to be used for rendering this action.
Returns:
unicode: The action rendered in HTML.
"""
content = ''
if self.should_render(context):
context.push()
try:
context[action_key] = self.copy_to_dict(context)
content = render_to_string(template_name, context)
finally:
context.pop()
return content
def register(self, parent=None):
"""Register this review request action instance.
Note:
Newly registered top-level actions are appended to the left of the
other previously registered top-level actions. So if we intend to
register a collection of top-level actions in a certain order, then
we likely want to iterate through the actions in reverse.
Args:
parent (BaseReviewRequestMenuAction, optional):
The parent action instance of this action instance.
Raises:
KeyError:
A second registration is attempted (action IDs must be unique
across all types of actions and menu actions, at any depth).
DepthLimitExceededError:
The maximum depth limit is exceeded.
"""
_populate_defaults()
if self.action_id in _all_actions:
raise KeyError('%s already corresponds to a registered review '
'request action' % self.action_id)
if self.max_depth > MAX_DEPTH_LIMIT:
raise DepthLimitExceededError(self.action_id, MAX_DEPTH_LIMIT)
if parent:
parent.child_actions.append(self)
self._parent = parent
else:
_top_level_ids.appendleft(self.action_id)
_all_actions[self.action_id] = self
def unregister(self):
"""Unregister this review request action instance.
Note:
This method can mutate its parent's child actions. So if we are
iteratively unregistering a parent's child actions, then we should
consider first making a clone of the list of children.
Raises:
KeyError: An unregistration is attempted before it's registered.
"""
_populate_defaults()
try:
del _all_actions[self.action_id]
except KeyError:
raise KeyError('%s does not correspond to a registered review '
'request action' % self.action_id)
if self._parent:
self._parent.child_actions.remove(self)
else:
_top_level_ids.remove(self.action_id)
self.reset_max_depth()
class BaseReviewRequestMenuAction(BaseReviewRequestAction):
"""A base class for an action with a dropdown menu.
Note:
A menu action's child actions must always be pre-registered.
"""
def __init__(self, child_actions=None):
"""Initialize this menu action.
Args:
child_actions (list of BaseReviewRequestAction, optional):
The list of child actions to be contained by this menu action.
Raises:
KeyError:
A second registration is attempted (action IDs must be unique
across all types of actions and menu actions, at any depth).
DepthLimitExceededError:
The maximum depth limit is exceeded.
"""
super(BaseReviewRequestMenuAction, self).__init__()
self.child_actions = []
child_actions = child_actions or []
for child_action in child_actions:
child_action.register(self)
def copy_to_dict(self, context):
"""Copy this menu action instance to a dictionary.
Args:
context (django.template.Context):
The collection of key-value pairs from the template.
Returns:
dict: The corresponding dictionary.
"""
dict_copy = {
'child_actions': self.child_actions,
}
dict_copy.update(super(BaseReviewRequestMenuAction, self).copy_to_dict(
context))
return dict_copy
@property
def max_depth(self):
"""Lazily compute the max depth of any action contained by this action.
Returns:
int: The max depth of any action contained by this action.
"""
if self.child_actions and self._max_depth == 0:
self._max_depth = 1 + max(child_action.max_depth
for child_action in self.child_actions)
return self._max_depth
def render(self, context, action_key='menu_action',
template_name='reviews/menu_action.html'):
"""Render this menu action instance and return the content as HTML.
Args:
context (django.template.Context):
The collection of key-value pairs that is passed to the
template in order to render this menu action.
action_key (unicode, optional):
The key to be used for this menu action in the context map.
template_name (unicode, optional):
The name of the template to be used for rendering this menu
action.
Returns:
unicode: The action rendered in HTML.
"""
return super(BaseReviewRequestMenuAction, self).render(
context, action_key, template_name)
def unregister(self):
"""Unregister this review request action instance.
This menu action recursively unregisters its child action instances.
Raises:
KeyError: An unregistration is attempted before it's registered.
"""
super(BaseReviewRequestMenuAction, self).unregister()
# Unregistration will mutate self.child_actions, so we make a copy.
for child_action in list(self.child_actions):
child_action.unregister()
# TODO: Convert all this to use djblets.registries.
def _populate_defaults():
"""Populate the default action instances."""
global _populated
if not _populated:
_populated = True
from reviewboard.reviews.default_actions import get_default_actions
for default_action in reversed(get_default_actions()):
default_action.register()
def get_top_level_actions():
"""Return a generator of all top-level registered action instances.
Yields:
BaseReviewRequestAction:
All top-level registered review request action instances.
"""
_populate_defaults()
return (_all_actions[action_id] for action_id in _top_level_ids)
def register_actions(actions, parent_id=None):
"""Register the given actions as children of the corresponding parent.
If no parent_id is given, then the actions are assumed to be top-level.
Args:
actions (iterable of BaseReviewRequestAction):
The collection of action instances to be registered.
parent_id (unicode, optional):
The action ID of the parent of each action instance to be
registered.
Raises:
KeyError:
The parent action cannot be found or a second registration is
attempted (action IDs must be unique across all types of actions
and menu actions, at any depth).
DepthLimitExceededError:
The maximum depth limit is exceeded.
"""
_populate_defaults()
if parent_id is None:
parent = None
else:
try:
parent = _all_actions[parent_id]
except KeyError:
raise KeyError('%s does not correspond to a registered review '
'request action' % parent_id)
for action in reversed(actions):
action.register(parent)
if parent:
parent.reset_max_depth()
def unregister_actions(action_ids):
"""Unregister each of the actions corresponding to the given IDs.
Args:
action_ids (iterable of unicode):
The collection of action IDs corresponding to the actions to be
removed.
Raises:
KeyError: An unregistration is attempted before it's registered.
"""
_populate_defaults()
for action_id in action_ids:
try:
action = _all_actions[action_id]
except KeyError:
raise KeyError('%s does not correspond to a registered review '
'request action' % action_id)
action.unregister()
def clear_all_actions():
"""Clear all registered actions.
This method is really only intended to be used by unit tests. We might be
able to remove this hack once we convert to djblets.registries.
Warning:
This will clear **all** actions, even if they were registered in
separate extensions.
"""
global _populated
_all_actions.clear()
_top_level_ids.clear()
_populated = False
| {
"content_hash": "42fcbb6150d2680d372523cf9e4364d4",
"timestamp": "",
"source": "github",
"line_count": 464,
"max_line_length": 79,
"avg_line_length": 31.94396551724138,
"alnum_prop": 0.614154635002024,
"repo_name": "chipx86/reviewboard",
"id": "0b7687fa9e461d63bd78fefdf5f21ca23351c793",
"size": "14822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewboard/reviews/actions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "434719"
},
{
"name": "HTML",
"bytes": "224310"
},
{
"name": "JavaScript",
"bytes": "3830753"
},
{
"name": "Python",
"bytes": "7333453"
},
{
"name": "Shell",
"bytes": "777"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import os
import re
import warnings
from contextlib import contextmanager
from datetime import timedelta
from django.conf import settings
from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.files import File
from django.utils import six, timezone
from djblets.testing.testcases import (FixturesCompilerMixin,
TestCase as DjbletsTestCase)
from oauthlib.common import generate_token
from oauth2_provider.models import AccessToken
from reviewboard import scmtools, initialize
from reviewboard.accounts.models import ReviewRequestVisit
from reviewboard.attachments.models import FileAttachment
from reviewboard.diffviewer.differ import DiffCompatVersion
from reviewboard.diffviewer.models import DiffSet, DiffSetHistory, FileDiff
from reviewboard.notifications.models import WebHookTarget
from reviewboard.oauth.models import Application
from reviewboard.reviews.models import (Comment,
FileAttachmentComment,
GeneralComment,
Group,
Review,
ReviewRequest,
ReviewRequestDraft,
Screenshot,
ScreenshotComment,
StatusUpdate)
from reviewboard.scmtools.models import Repository, Tool
from reviewboard.site.models import LocalSite
from reviewboard.webapi.models import WebAPIToken
class TestCase(FixturesCompilerMixin, DjbletsTestCase):
"""The base class for Review Board test cases.
This class provides a number of convenient functions for creating
common objects for testing, such as review requests and comments. They're
populated with default data that can be overridden by the callers.
This also overcomes an annoyance with default Django unit tests where
the cache is not cleared across tests, leading to inconsistent results
and useless testing.
"""
local_site_name = 'local-site-1'
local_site_id = 1
ws_re = re.compile(r'\s+')
DEFAULT_FILEDIFF_DATA = (
b'--- README\trevision 123\n'
b'+++ README\trevision 123\n'
b'@@ -1 +1 @@\n'
b'-Hello, world!\n'
b'+Hello, everybody!\n'
)
DEFAULT_GIT_FILEDIFF_DATA = (
b'diff --git a/README b/README\n'
b'index 94bdd3e..197009f 100644\n'
b'--- README\n'
b'+++ README\n'
b'@@ -2 +2 @@\n'
b'-blah blah\n'
b'+blah!\n'
)
def setUp(self):
super(TestCase, self).setUp()
initialize()
self._local_sites = {}
# Clear the cache so that previous tests don't impact this one.
cache.clear()
def shortDescription(self):
"""Returns the description of the current test.
This changes the default behavior to replace all newlines with spaces,
allowing a test description to span lines. It should still be kept
short, though.
"""
doc = self._testMethodDoc
if doc is not None:
doc = doc.split('\n\n', 1)[0]
doc = self.ws_re.sub(' ', doc).strip()
return doc
def get_local_site_or_none(self, name):
"""Returns a LocalSite matching the name, if provided, or None."""
if name:
return self.get_local_site(name=name)
else:
return None
def get_local_site(self, name):
if name not in self._local_sites:
self._local_sites[name] = LocalSite.objects.get(name=name)
return self._local_sites[name]
def create_webapi_token(self, user, note='Sample note',
policy={'access': 'rw'},
with_local_site=False,
**kwargs):
"""Creates a WebAPIToken for testing."""
if with_local_site:
local_site = self.get_local_site(name=self.local_site_name)
else:
local_site = None
return WebAPIToken.objects.generate_token(user=user,
note=note,
policy=policy,
local_site=local_site)
@contextmanager
def assert_warns(self, cls=DeprecationWarning, message=None):
"""A context manager for asserting code generates a warning.
This method only supports code which generates a single warning.
Tests which make use of code generating multiple warnings will
need to manually catch their warnings.
"""
with warnings.catch_warnings(record=True) as w:
# Some warnings such as DeprecationWarning are filtered by
# default, stop filtering them.
warnings.simplefilter("always")
self.assertEqual(len(w), 0)
yield
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, cls))
if message is not None:
self.assertEqual(message, six.text_type(w[-1].message))
def create_diff_file_attachment(self, filediff, from_modified=True,
review_request=None,
orig_filename='filename.png',
caption='My Caption',
mimetype='image/png',
**kwargs):
"""Creates a diff-based FileAttachment for testing.
The FileAttachment is tied to the given FileDiff. It's populated
with default data that can be overridden by the caller.
"""
file_attachment = FileAttachment.objects.create_from_filediff(
filediff=filediff,
from_modified=from_modified,
caption=caption,
orig_filename=orig_filename,
mimetype=mimetype,
**kwargs)
filename = os.path.join(settings.STATIC_ROOT, 'rb', 'images',
'logo.png')
with open(filename, 'r') as f:
file_attachment.file.save(filename, File(f), save=True)
if review_request:
review_request.file_attachments.add(file_attachment)
return file_attachment
def create_diffset(self, review_request=None, revision=1, repository=None,
draft=False, name='diffset'):
"""Creates a DiffSet for testing.
The DiffSet defaults to revision 1. This can be overriden by the
caller.
DiffSets generally are tied to a ReviewRequest, but it's optional.
"""
if review_request:
repository = review_request.repository
diffset = DiffSet.objects.create(
name=name,
revision=revision,
repository=repository,
diffcompat=DiffCompatVersion.DEFAULT)
if review_request:
if draft:
review_request_draft = \
ReviewRequestDraft.create(review_request)
review_request_draft.diffset = diffset
review_request_draft.save()
else:
review_request.diffset_history.diffsets.add(diffset)
return diffset
def create_diff_comment(self, review, filediff, interfilediff=None,
text='My comment', issue_opened=False,
issue_status=None, first_line=1, num_lines=5,
extra_fields=None, reply_to=None, **kwargs):
"""Create a Comment for testing.
The comment is tied to the given Review and FileDiff (and, optionally,
an interfilediff). It's populated with default data that can be
overridden by the caller.
Args:
review (reviewboard.reviews.models.review.Review):
The review associated with the comment.
filediff (reviewboard.diffviewer.models.FileDiff):
The FileDiff associated with the comment.
interfilediff (reviewboard.diffviewer.models.FileDiff, optional):
The FileDiff used for the end of an interdiff range associated
with the comment.
text (unicode):
The text for the comment.
issue_opened (bool, optional):
Whether an issue is to be opened for the comment.
issue_status (unicode, optional):
The issue status to set, if an issue is opened. Defaults to
being an open issue.
first_line (int, optional):
The first line (0-based) of the comment range.
num_lines (int, optional):
The number of lines in the comment.
extra_fields (dict, optional):
Extra data to set on the comment.
reply_to (reviewboard.reviews.models.diff_comment.Comment,
optional):
The comment this comment replies to.
**kwargs (dict):
Additional model attributes to set on the comment.
Returns:
reviewboard.reviews.models.diff_comment.Comment:
The resulting comment.
"""
if issue_opened and not issue_status:
issue_status = Comment.OPEN
comment = Comment(
filediff=filediff,
interfilediff=interfilediff,
first_line=first_line,
num_lines=num_lines,
text=text,
issue_opened=issue_opened,
issue_status=issue_status,
reply_to=reply_to,
**kwargs)
if extra_fields:
comment.extra_data = extra_fields
comment.save()
review.comments.add(comment)
return comment
def create_file_attachment(self, review_request,
orig_filename='filename.png',
caption='My Caption',
draft=False,
active=True,
**kwargs):
"""Creates a FileAttachment for testing.
The FileAttachment is tied to the given ReviewRequest. It's populated
with default data that can be overridden by the caller.
"""
file_attachment = self._create_base_file_attachment(
caption=caption,
orig_filename=orig_filename,
**kwargs)
if draft:
review_request_draft = ReviewRequestDraft.create(review_request)
if active:
attachments = review_request_draft.file_attachments
else:
attachments = review_request_draft.inactive_file_attachments
else:
if active:
attachments = review_request.file_attachments
else:
attachments = review_request.inactive_file_attachments
attachments.add(file_attachment)
return file_attachment
def create_user_file_attachment(self, user,
caption='My Caption',
with_local_site=False,
local_site_name=None,
local_site=None,
has_file=False,
orig_filename='filename.png',
**kwargs):
"""Create a user FileAttachment for testing.
The :py:class:`reviewboard.attachments.models.FileAttachment` is tied
to the given :py:class:`django.contrib.auth.models.User`. It's
populated with default data that can be overridden by the caller.
Notably, by default the FileAttachment will be created without a file
or a local_site.
Args:
user (django.contrib.auth.models.User):
The user who owns the file attachment.
caption (unicode, optional):
The caption for the file attachment.
with_local_site (bool, optional):
``True`` if the file attachment should be associated with a
local site. If this is set, one of ``local_site_name`` or
``local_site`` should be provided as well.
local_site_name (unicode, optional):
The name of the local site to associate this attachment with.
local_site (reviewboard.site.models.LocalSite, optional):
The local site to associate this attachment with.
has_file (bool, optional):
``True`` if an actual file object should be included in the
model.
orig_filename (unicode, optional):
The original name of the file to set in the model.
kwargs (dict):
Additional keyword arguments to pass into the FileAttachment
constructor.
Returns:
reviewboard.attachments.models.FileAttachment:
The new file attachment instance.
"""
return self._create_base_file_attachment(
caption=caption,
user=user,
has_file=has_file,
orig_filename=orig_filename,
with_local_site=with_local_site,
local_site_name=local_site_name,
local_site=local_site,
**kwargs)
def create_file_attachment_comment(self, review, file_attachment,
diff_against_file_attachment=None,
text='My comment', issue_opened=False,
issue_status=None, extra_fields=None,
reply_to=None, **kwargs):
"""Create a FileAttachmentComment for testing.
The comment is tied to the given Review and FileAttachment. It's
populated with default data that can be overridden by the caller.
Args:
review (reviewboard.reviews.models.review.Review):
The review associated with the comment.
file_attachment (reviewboard.attachments.models.FileAttachment):
The file attachment associated with the comment.
diff_against_file_attachment (reviewboard.attachments.models.
FileAttachment, optional):
The file attachment being diff against, for comments on
attachment diffs.
text (unicode):
The text for the comment.
issue_opened (bool, optional):
Whether an issue is to be opened for the comment.
issue_status (unicode, optional):
The issue status to set, if an issue is opened. Defaults to
being an open issue.
extra_fields (dict, optional):
Extra data to set on the comment.
reply_to (reviewboard.reviews.models.file_attachment_comment.
FileAttachmentComment, optional):
The comment this comment replies to.
**kwargs (dict):
Additional model attributes to set on the comment.
Returns:
reviewboard.reviews.models.file_attachment_comment.FileAttachmentComment:
The resulting comment.
"""
if issue_opened and not issue_status:
issue_status = FileAttachmentComment.OPEN
comment = FileAttachmentComment(
file_attachment=file_attachment,
diff_against_file_attachment=diff_against_file_attachment,
text=text,
issue_opened=issue_opened,
issue_status=issue_status,
reply_to=reply_to,
**kwargs)
if extra_fields:
comment.extra_data = extra_fields
comment.save()
review.file_attachment_comments.add(comment)
return comment
def create_filediff(self, diffset, source_file='/test-file',
dest_file='/test-file', source_revision='123',
dest_detail='124', status=FileDiff.MODIFIED,
diff=DEFAULT_FILEDIFF_DATA):
"""Creates a FileDiff for testing.
The FileDiff is tied to the given DiffSet. It's populated with
default data that can be overridden by the caller.
"""
return FileDiff.objects.create(
diffset=diffset,
source_file=source_file,
dest_file=dest_file,
source_revision=source_revision,
dest_detail=dest_detail,
status=status,
diff=diff)
def create_repository(self, with_local_site=False, name='Test Repo',
tool_name='Git', path=None, local_site=None,
**kwargs):
"""Creates a Repository for testing.
The Repository may optionally be attached to a LocalSite. It's also
populated with default data that can be overridden by the caller.
This accepts a tool_name of "Git", "Mercurial" or "Subversion".
The correct bundled repository path will be used for the given
tool_name.
"""
if not local_site:
if with_local_site:
local_site = self.get_local_site(name=self.local_site_name)
else:
local_site = None
testdata_dir = os.path.join(os.path.dirname(scmtools.__file__),
'testdata')
if not path:
if tool_name in ('Git', 'Test',
'TestToolSupportsPendingChangeSets'):
path = os.path.join(testdata_dir, 'git_repo')
elif tool_name == 'Subversion':
path = 'file://' + os.path.join(testdata_dir, 'svn_repo')
elif tool_name == 'Mercurial':
path = os.path.join(testdata_dir, 'hg_repo.bundle')
elif tool_name == 'CVS':
path = os.path.join(testdata_dir, 'cvs_repo')
else:
raise NotImplementedError
return Repository.objects.create(
name=name,
local_site=local_site,
tool=Tool.objects.get(name=tool_name),
path=path,
**kwargs)
def create_review_request(self, with_local_site=False, local_site=None,
summary='Test Summary',
description='Test Description',
testing_done='Testing',
submitter='doc',
branch='my-branch',
local_id=1001,
bugs_closed='', status='P', public=False,
publish=False, commit_id=None, changenum=None,
repository=None, id=None,
create_repository=False):
"""Create a ReviewRequest for testing.
The ReviewRequest may optionally be attached to a LocalSite. It's also
populated with default data that can be overridden by the caller.
If create_repository is True, a Repository will be created
automatically. If set, a custom repository cannot be provided.
The provided submitter may either be a username or a User object.
If publish is True, ReviewRequest.publish() will be called.
"""
if not local_site:
if with_local_site:
local_site = self.get_local_site(name=self.local_site_name)
else:
local_site = None
if not local_site:
local_id = None
if create_repository:
assert not repository
repository = \
self.create_repository(with_local_site=with_local_site)
if not isinstance(submitter, User):
submitter = User.objects.get(username=submitter)
review_request = ReviewRequest(
summary=summary,
description=description,
branch=branch,
testing_done=testing_done,
local_site=local_site,
local_id=local_id,
submitter=submitter,
diffset_history=DiffSetHistory.objects.create(),
repository=repository,
public=public,
commit_id=commit_id,
changenum=changenum,
bugs_closed=bugs_closed,
status=status)
# Set this separately to avoid issues with CounterField updates.
review_request.id = id
review_request.save()
if publish:
review_request.publish(review_request.submitter)
return review_request
def create_visit(self, review_request, visibility, user='doc',
username=None, timestamp=None):
"""Create a ReviewRequestVisit for testing.
The ReviewRequestVisit is tied to the given ReviewRequest and User.
It's populated with default data that can be overridden by the caller.
The provided user may either be a username or a User object.
"""
if not isinstance(user, basestring):
user = User.objects.get(username=user)
return ReviewRequestVisit.objects.create(
review_request=review_request,
visibility=visibility,
user=user)
def create_review(self, review_request, user='dopey',
body_top='Test Body Top', body_bottom='Test Body Bottom',
ship_it=False, publish=False, timestamp=None, **kwargs):
"""Creates a Review for testing.
The Review is tied to the given ReviewRequest. It's populated with
default data that can be overridden by the caller.
The provided user may either be a username or a User object.
If publish is True, Review.publish() will be called.
Args:
review_request (reviewboard.reviews.models.review_request.
ReviewRequest):
The review request the review is filed against.
user (unicode or django.contrib.auth.models.User, optional):
The username or User object owning the review.
body_top (unicode, optional):
The text for the ``body_top`` field.
body_bottom (unicode, optional):
The text for the ``body_bottom`` field.
ship_it (bool, optional):
The Ship It state for the review.
publish (bool, optional):
Whether to publish the review immediately after creation.
timestamp (datetime.datetime, optional):
The timestamp for the review.
**kwargs (dict):
Additional attributes to set in the review.
Returns:
reviewboard.reviews.models.review.Review:
The resulting review.
"""
if not isinstance(user, User):
user = User.objects.get(username=user)
review = Review.objects.create(
review_request=review_request,
user=user,
body_top=body_top,
body_bottom=body_bottom,
ship_it=ship_it,
**kwargs)
if publish:
review.publish()
if timestamp:
Review.objects.filter(pk=review.pk).update(timestamp=timestamp)
review.timestamp = timestamp
return review
def create_review_group(self, name='test-group', with_local_site=False,
local_site=None, visible=True, invite_only=False,
is_default_group=False):
"""Creates a review group for testing.
The group may optionally be attached to a LocalSite. It's also
populated with default data that can be overridden by the caller.
"""
if not local_site and with_local_site:
local_site = self.get_local_site(name=self.local_site_name)
return Group.objects.create(
name=name,
local_site=local_site,
visible=visible,
invite_only=invite_only,
is_default_group=is_default_group)
def create_reply(self, review, user='grumpy', username=None,
body_top='Test Body Top', timestamp=None,
publish=False):
"""Creates a review reply for testing.
The reply is tied to the given Review. It's populated with default
data that can be overridden by the caller.
"""
if not isinstance(user, User):
user = User.objects.get(username=user)
reply = Review.objects.create(
review_request=review.review_request,
user=user,
body_top=body_top,
base_reply_to=review,
timestamp=timestamp)
if publish:
reply.publish()
return reply
def create_screenshot(self, review_request, caption='My caption',
draft=False, active=True):
"""Creates a Screenshot for testing.
The Screenshot is tied to the given ReviewRequest. It's populated
with default data that can be overridden by the caller.
"""
screenshot = Screenshot(caption=caption)
filename = os.path.join(settings.STATIC_ROOT, 'rb', 'images',
'logo.png')
with open(filename, 'r') as f:
screenshot.image.save(filename, File(f), save=True)
if draft:
review_request_draft = ReviewRequestDraft.create(review_request)
if active:
screenshots = review_request_draft.screenshots
else:
screenshots = review_request_draft.inactive_screenshots
else:
if active:
screenshots = review_request.screenshots
else:
screenshots = review_request.inactive_screenshots
screenshots.add(screenshot)
return screenshot
def create_screenshot_comment(self, review, screenshot, text='My comment',
x=1, y=1, w=5, h=5, issue_opened=False,
issue_status=None, extra_fields=None,
reply_to=None, **kwargs):
"""Create a ScreenshotComment for testing.
The comment is tied to the given Review and Screenshot. It's
It's populated with default data that can be overridden by the caller.
Args:
review (reviewboard.reviews.models.review.Review):
The review associated with the comment.
screenshot (reviewboard.reviews.models.screenshot.Screenshot):
The screenshot associated with the comment.
text (unicode):
The text for the comment.
x (int, optional):
The X location for the comment on the screenshot.
y (int, optional):
The Y location for the comment on the screenshot.
w (int, optional):
The width for the comment on the screenshot.
h (int, optional):
The height for the comment on the screenshot.
issue_opened (bool, optional):
Whether an issue is to be opened for the comment.
issue_status (unicode, optional):
The issue status to set, if an issue is opened. Defaults to
being an open issue.
extra_fields (dict, optional):
Extra data to set on the comment.
reply_to (reviewboard.reviews.models.general_comment.
GeneralComment, optional):
The comment this comment replies to.
**kwargs (dict):
Additional model attributes to set on the comment.
Returns:
reviewboard.reviews.models.screenshot_comment.ScreenshotComment:
The resulting comment.
"""
if issue_opened and not issue_status:
issue_status = ScreenshotComment.OPEN
comment = ScreenshotComment(
screenshot=screenshot,
text=text,
x=x,
y=y,
w=w,
h=h,
issue_opened=issue_opened,
issue_status=issue_status,
reply_to=reply_to,
**kwargs)
if extra_fields:
comment.extra_data = extra_fields
comment.save()
review.screenshot_comments.add(comment)
return comment
def _create_base_file_attachment(self,
caption='My Caption',
orig_filename='filename.png',
has_file=True,
user=None,
with_local_site=False,
local_site_name=None,
local_site=None,
**kwargs):
"""Create a FileAttachment object with the given parameters.
When creating a
:py:class:`reviewboard.attachments.models.FileAttachment` that will be
associated to a review request, a user and local_site should not be
specified.
Args:
caption (unicode, optional):
The caption for the file attachment.
orig_filename (unicode, optional):
The original name of the file to set in the model.
has_file (bool, optional):
``True`` if an actual file object should be included in the
model.
user (django.contrib.auth.models.User, optonal):
The user who owns the file attachment.
with_local_site (bool, optional):
``True`` if the file attachment should be associated with a
local site. If this is set, one of ``local_site_name`` or
``local_site`` should be provided as well.
local_site_name (unicode, optional):
The name of the local site to associate this attachment with.
local_site (reviewboard.site.models.LocalSite, optional):
The local site to associate this attachment with.
kwargs (dict):
Additional keyword arguments to pass into the FileAttachment
constructor.
Returns:
reviewboard.attachments.models.FileAttachment:
The new file attachment instance.
"""
if with_local_site:
local_site = self.get_local_site(name=local_site_name)
file_attachment = FileAttachment(
caption=caption,
user=user,
uuid='test-uuid',
local_site=local_site,
**kwargs)
if has_file:
filename = os.path.join(settings.STATIC_ROOT, 'rb', 'images',
'logo.png')
file_attachment.orig_filename = orig_filename
file_attachment.mimetype = 'image/png'
with open(filename, 'r') as f:
file_attachment.file.save(filename, File(f), save=True)
file_attachment.save()
return file_attachment
def create_general_comment(self, review, text='My comment',
issue_opened=False, issue_status=None,
extra_fields=None, reply_to=None, **kwargs):
"""Create a GeneralComment for testing.
The comment is tied to the given Review. It is populated with
default data that can be overridden by the caller.
Args:
review (reviewboard.reviews.models.review.Review):
The review associated with the comment.
text (unicode):
The text for the comment.
issue_opened (bool, optional):
Whether an issue is to be opened for the comment.
issue_status (unicode, optional):
The issue status to set, if an issue is opened. Defaults to
being an open issue.
extra_fields (dict, optional):
Extra data to set on the comment.
reply_to (reviewboard.reviews.models.general_comment.
GeneralComment, optional):
The comment this comment replies to.
**kwargs (dict):
Additional model attributes to set on the comment.
Returns:
reviewboard.reviews.models.general_comment.GeneralComment:
The resulting comment.
"""
if issue_opened and not issue_status:
issue_status = GeneralComment.OPEN
comment = GeneralComment(
text=text,
issue_opened=issue_opened,
issue_status=issue_status,
reply_to=reply_to,
**kwargs)
if extra_fields:
comment.extra_data = extra_fields
comment.save()
review.general_comments.add(comment)
return comment
def create_status_update(self, review_request, user='dopey',
service_id='service', summary='Status Update',
state=StatusUpdate.PENDING,
review=None,
change_description=None):
"""Create a status update for testing.
It is populated with default data that can be overridden by the caller.
Args:
review_request (reviewboard.reviews.models.ReviewRequest):
The review request to associate with the new status update.
user (django.contrib.auth.models.User or unicode):
Either the user model or the username of the user who should
own the status update.
service_id (unicode):
The ID to fill in for the new model.
summary (unicode):
The summary to fill in for the new model.
state (unicode):
The state for the new model. This must be one of the valid
choices for the state field.
review (reviewboard.reviews.models.review.Review, optional):
The review associated with this status update.
change_description (reviewboard.changedescs.models.
ChangeDescription, optional):
The change description for this status update.
Returns:
reviewboard.reviews.models.StatusUpdate:
The new status update.
"""
if not isinstance(user, User):
user = User.objects.get(username=user)
return StatusUpdate.objects.create(
review_request=review_request,
change_description=change_description,
service_id=service_id,
summary=summary,
state=state,
review=review,
user=user)
def create_webhook(self, enabled=False, events=WebHookTarget.ALL_EVENTS,
url='http://example.com',
encoding=WebHookTarget.ENCODING_JSON,
use_custom_content=False, custom_content='',
secret='', apply_to=WebHookTarget.APPLY_TO_ALL,
repositories=None, with_local_site=False,
local_site=None, extra_fields=None):
"""Create a webhook for testing.
It is populated with default data that can be overridden by the caller.
Args:
enabled (bool):
Whether or not the webhook is enabled when it is created.
events (unicode):
A comma-separated list of events that the webhook will trigger
on.
url (unicode):
The URL that requests will be made against.
encoding (unicode):
The encoding of the payload to send.
use_custom_content (bool):
Determines if custom content will be sent for the payload (if
``True``) or if it will be auto-generated (if ``False``).
custom_content (unicode):
The custom content to send when ``use_custom_content`` is
``True``.
secret (unicode):
An HMAC secret to sign the payload with.
apply_to (unicode):
The types of repositories the webhook will apply to.
repositories (list):
A list of repositories that the webhook will be limited to if
``apply_to`` is ``WebHookTarget.APPLY_TO_SELECTED_REPOS``.
with_local_site (bool):
Determines if this should be created with a local site.
local_site (reviewboard.site.models.LocalSite):
An optional local site. If ``with_local_site`` is ``True`` and
this argument is ``None``, the local site will be looked up.
extra_fields (dict):
Extra data to be imported into the webhook.
Returns:
WebHookTarget: A webhook constructed with the given arguments.
"""
if not local_site:
if with_local_site:
local_site = self.get_local_site(name=self.local_site_name)
else:
local_site = None
webhook = WebHookTarget.objects.create(
enabled=enabled,
events=events.split(','),
url=url,
encoding=encoding,
use_custom_content=use_custom_content,
custom_content=custom_content,
secret=secret,
apply_to=apply_to,
local_site=local_site)
if repositories:
webhook.repositories = repositories
if extra_fields:
webhook.extra_data = extra_fields
webhook.save(update_fields=['extra_data'])
return webhook
def create_oauth_application(
self, user, local_site=None, with_local_site=False,
redirect_uris='http://example.com',
authorization_grant_type=Application.GRANT_CLIENT_CREDENTIALS,
client_type=Application.CLIENT_PUBLIC,
**kwargs):
"""Create an OAuth application.
Args:
user (django.contrib.auth.models.User):
The user whom is to own the application.
local_site (reviewboard.site.models.LocalSite, optional):
The LocalSite for the application to be associated with, if
any.
redirect_uris (unicode, optional):
A whitespace-separated list of allowable redirect URIs.
authorization_grant_type (unicode, optional):
The grant type for the application.
client_type (unicode, optional):
The application client type.
**kwargs (dict):
Additional keyword arguments to pass to the
:py:class:`~reviewboard.oauth.models.Application` initializer.
Returns:
reviewboard.oauth.models.Application:
The created application.
"""
if not local_site:
if with_local_site:
local_site = self.get_local_site(self.local_site_name)
else:
local_site = None
return Application.objects.create(
user=user,
local_site=local_site,
authorization_grant_type=authorization_grant_type,
redirect_uris=redirect_uris,
client_type=client_type,
extra_data='{}',
**kwargs)
def create_oauth_token(self, application, user, scope='', expires=None,
**kwargs):
"""Create an OAuth2 access token for testing.
Args:
application (reviewboard.oauth.models.Application):
The application the token should be associated with.
user (django.contrib.auth.models.User):
The user who should own the token.
scope (unicode, optional):
The scopes of the token. This argument defaults to the empty
scope.
expires (datetime.timedelta, optional):
How far into the future the token expires. If not provided,
this argument defaults to one hour.
Returns:
oauth2_provider.models.AccessToken:
The created access token.
"""
if expires is None:
expires = timedelta(hours=1)
return AccessToken.objects.create(
application=application,
token=generate_token(),
expires=timezone.now() + expires,
scope=scope,
user=user,
)
| {
"content_hash": "8fa4548ac429a1bc381f4c55ed38540b",
"timestamp": "",
"source": "github",
"line_count": 1131,
"max_line_length": 85,
"avg_line_length": 36.315649867374006,
"alnum_prop": 0.5581282107467193,
"repo_name": "brennie/reviewboard",
"id": "bcb7a00473d82b77e2cade4699da0041398a921e",
"size": "41073",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewboard/testing/testcase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "247208"
},
{
"name": "HTML",
"bytes": "204351"
},
{
"name": "JavaScript",
"bytes": "2557855"
},
{
"name": "Python",
"bytes": "5241630"
},
{
"name": "Shell",
"bytes": "20225"
}
],
"symlink_target": ""
} |
from obspy.core import UTCDateTime as UTC
from sito.data import IPOC
from sito.noisexcorr import (prepare, get_correlations,
plotXcorrs, noisexcorrf, stack)
from sito import util
import matplotlib.pyplot as plt
from sito.stream import read
from multiprocessing import Pool
import time
from sito import seismometer
def main():
stations = 'PB01 PB02 PB03 PB04 PB05 PB06 PB07 PB08 HMBCX MNMCX PATCX PSGCX LVC'
stations = 'PB09 PB10 PB11 PB12 PB13 PB14 PB15 PB16'
stations = 'PATCX'
stations2 = None
components = 'Z'
# TOcopilla earthquake: 2007-11-14 15:14
t1 = UTC('2007-10-01')
t2 = UTC('2007-11-30')
#t2 = UTC('2012-10-01')
#t2 = UTC('2011-12-31')
# t1 = UTC('2009-05-01')
# t2 = UTC('2009-05-03')
shift = 100
shift = 60
#correlations = get_correlations(stations, components, stations2, only_auto=True)
correlations = get_correlations(stations, components, stations2)
print correlations
method = 'zerotest_nozero'
#method = 'FINAL_filter4-6_1bit_auto_3C'
#method = 'FINAL_filter3-5'
data = IPOC(xcorr_append='/' + method, use_local_LVC=False)
data.setXLogger('_' + method)
pool = Pool()
prepare(data, stations.split(), t1, t2, component=components,
filter=(4, 6, 2, True), downsample=50,
#eventremoval='waterlevel_env2', param_removal=(10, 0),
eventremoval=None, param_removal=None,
whitening=False,
normalize='1bit', param_norm=None,
pool=pool)
noisexcorrf(data, correlations, t1, t2, shift, period=24 * 3600, pool=pool)
# noisexcorrf(data, correlations, t1, t2, shift, period=5 * 60, pool=pool,
# max_preload=1000)
pool.close()
pool.join()
# plotXcorrs(data, correlations, t1, t2, start=None, end=None, plot_overview=True, plot_years=False, use_dlognorm=False,
# plot_stack=True, plot_psd=False, add_to_title='', downsample=None)
#plt.rc('font', size=16)
plotXcorrs(data, correlations, t1, t2, start=-20, end=20, plot_overview=True, plot_years=False, use_dlognorm=False,
plot_stack=True, plot_psd=False, downsample=None, ext='_hg0.02_dis.pdf', vmax=0.02,
add_to_title='4-6Hz', ylabel=None)
# stack(data, correlations, dt= -1)
#stack(data, correlations, dt=60 * 60, period=5 * 60)
# stack(data, correlations, dt=24 * 60 * 60, period=5 * 60)
# plotXcorrs(data, correlations, t1=None, t2=None, start=None, end=None, plot_overview=True, plot_years=False, use_dlognorm=False,
# plot_stack=True, plot_psd=False, add_to_title='', downsample=None,
# stack=('10days', '2days'))
# plotXcorrs(data, correlations, t1, t2, start=0, end=20, plot_overview=True, plot_years=False, use_dlognorm=False,
# plot_stack=True, plot_psd=False, add_to_title='', downsample=None,
# period=60 * 5, stack=(60 * 60, None), ext='_hg.png', vmax=0.1)
# util.checkDir(data.getPlotX(('', ''), t1))
#for correlation in correlations:
# stations = correlation[0][:-1], correlation[1][:-1]
# dist = data.stations.dist(*stations)
## if dist >= 120:
## t = (dist // 100) * 50 + 50
## else:
## t = 70
# t = 200
# stream = data.readDayXcorr(correlation, t1, t2)
# if len(stream) > 0:
# stream.plotXcorr(-t, t, imshow=True, vmax=0.01, vmin_rel='vmax',
# fig=plt.figure(figsize=(8.267, 11.693)),
# figtitle='station ' + method + ' around Tocopilla event',
# dateformatter='%y-%m-%d', show=False,
# save=data.getPlotX(correlation, 'Tocopilla_0.01.png'),
# stack_lim=None)
#
# method = 'rm5_filter0.1-1'
# data = IPOC(xcorr_append='/tests/' + method, use_local_LVC=True)
# data.setXLogger('_' + method)
# prepare(data, stations.split(' '), t1, t2, filter=(0.1, 1.), downsample=10,
# component=components, normalize='runningmean', norm_param=5 * 10 + 1,
# use_floating_stream=True)
# xcorr_day(data, correlations, t1, t2, shift, use_floating_stream=True)
# plotXcorrs(data, correlations, t1, t2, plot_overview=False, plot_stack=True, plot_psd=True, add_to_title=method)
#
#
# method = 'rm50_filter0.01'
# data = IPOC(xcorr_append='/tests/' + method, use_local_LVC=True)
# data.setXLogger('_' + method)
# prepare(data, stations.split(' '), t1, t2, filter=(0.01, None), downsample=None,
# component=components, normalize='runningmean', norm_param=50 * 100 + 1,
# use_floating_stream=True)
# xcorr_day(data, correlations, t1, t2, shift, use_floating_stream=True)
# plotXcorrs(data, correlations, t1, t2, plot_overview=False, plot_stack=True, plot_psd=True, add_to_title=method)
#
#
# method = 'rm0.25_filter2'
# data = IPOC(xcorr_append='/tests/' + method, use_local_LVC=True)
# data.setXLogger('_' + method)
# prepare(data, stations.split(' '), t1, t2, filter=(2, None), downsample=None,
# component=components, normalize='runningmean', norm_param=100 // 4 + 1,
# use_floating_stream=True)
# xcorr_day(data, correlations, t1, t2, shift, use_floating_stream=True)
# plotXcorrs(data, correlations, t1, t2, plot_overview=False, plot_stack=True, plot_psd=True, add_to_title=method)
if __name__ == '__main__':
main()
| {
"content_hash": "cad64db182a1665ea1f681ecc2ec9a58",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 133,
"avg_line_length": 44.91869918699187,
"alnum_prop": 0.6079638009049774,
"repo_name": "trichter/sito",
"id": "7567fe4d6f0561a90507bd198cae244886417428",
"size": "5555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/noise/noise_s_final_autocorr3.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6517"
},
{
"name": "Fortran",
"bytes": "135891"
},
{
"name": "Python",
"bytes": "930600"
},
{
"name": "Shell",
"bytes": "192"
}
],
"symlink_target": ""
} |
import io
import os
from unittest import TestCase
def get_path(path):
return os.path.join(
os.path.dirname(os.path.abspath(__file__)),
path,
)
class SyamlDefaultReadTest(TestCase):
def _call_fut(self, *args, **kwds):
from .. import syaml
create_reader = syaml.SyamlReaderFactory()
reader = create_reader()
return reader(*args, **kwds)
def test_fileobj(self):
yaml_path = get_path('./syaml_test.yaml')
with open(yaml_path, 'rb') as fp:
obj = self._call_fut(fp)
path = os.path.abspath(yaml_path)
here = os.path.dirname(path)
name = os.path.basename(path)
self.assertEqual(obj[0]['here'], here)
self.assertEqual(obj[1]['name'], name)
self.assertEqual(obj[2]['path'], path)
self.assertEqual(obj[3]['test'], 'OK')
def test_bytesio(self):
yaml_path = get_path('./syaml_test.yaml')
with open(yaml_path, 'rb') as fp:
with io.BytesIO(fp.read()) as dp:
dp.seek(0)
obj = self._call_fut(dp)
path = ''
here = ''
name = ''
self.assertEqual(obj[0]['here'], here)
self.assertEqual(obj[1]['name'], name)
self.assertEqual(obj[2]['path'], path)
self.assertEqual(obj[3]['test'], 'OK')
def test_filepath(self):
yaml_path = get_path('./syaml_test.yaml')
obj = self._call_fut(yaml_path)
path = os.path.abspath(yaml_path)
here = os.path.dirname(path)
name = os.path.basename(path)
self.assertEqual(obj[0]['here'], here)
self.assertEqual(obj[1]['name'], name)
self.assertEqual(obj[2]['path'], path)
self.assertEqual(obj[3]['test'], 'OK')
| {
"content_hash": "bff2145d63119024d6f5eed5c8ba72bf",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 51,
"avg_line_length": 29.081967213114755,
"alnum_prop": 0.5535512965050733,
"repo_name": "TakesxiSximada/syaml",
"id": "25ebe45f25be7bb07deccb09acf790488985af13",
"size": "1774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/syaml/tests/test_syaml.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "896"
},
{
"name": "Python",
"bytes": "10832"
}
],
"symlink_target": ""
} |
class Counter(object):
def __init__(self, init=0, notification_interval=1000):
super(Counter, self).__init__()
self.cnt = init
self.init = init
self.notification_interval = notification_interval
if self.notification_interval:
print("Start counting: %d/%d" % (self.init-self.cnt, self.init))
def decrease(self):
self.cnt -= 1
if self.notification_interval and ((self.init-self.cnt) % self.notification_interval == 0):
print("Current: %d/%d" % (self.init-self.cnt, self.init))
def is_equal_or_below(self, value):
return self.cnt <= value
def reset(self):
self.cnt = self.init
| {
"content_hash": "ece2548c6eb666efffbb2413c185ac2b",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 99,
"avg_line_length": 36.26315789473684,
"alnum_prop": 0.6008708272859217,
"repo_name": "weijia/django-excel-to-model",
"id": "314fb5dd317a6d1baf2f96829def8ee448abe2af",
"size": "691",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_excel_to_model/management/commands/utils/counter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1774"
},
{
"name": "Makefile",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "49812"
}
],
"symlink_target": ""
} |
import subprocess
import glob
import os
import re
import time
import datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import benchmark_configuration as config
def deleteFiles(pattern):
for fl in glob.glob(os.getcwd()+os.path.sep+pattern):
if os.path.isfile(fl):
os.remove(fl)
def deleteFile(filePath):
if os.path.isfile(filePath):
os.remove(filePath)
def deleteDataFiles():
deleteFiles("data*")
def deleteIndexFiles():
deleteFiles("index*")
def performCleanup():
deleteDataFiles()
deleteIndexFiles()
# In case you don't want to use timestamp as suffix set the flag
# 'appendTimeStampInBenchmarkFolder = False'
if config.appendTimeStampInBenchmarkFolder:
# Update the output parents with the folder name plus time stamps
timestamp = time.time()
stringTimeStamp = datetime.datetime.fromtimestamp(timestamp).strftime('%Y%m%d_%H%M%S')
config.xy_output_parent = config.xy_output_parent + '_' + stringTimeStamp
config.scatter_output_parent = config.scatter_output_parent + '_' + stringTimeStamp
xy_file_path = config.xy_parent_folder + os.path.sep + config.xy_sim_file + "." + config.xy_extension
xy_output_file_path = config.xy_output_parent + os.path.sep + config.xy_output_file + "." + config.xy_log_output_extension
time_output_file_path = config.xy_output_parent + os.path.sep + config.time_output_file + "." + config.xy_output_extension
data_written_output_file_path = config.xy_output_parent + os.path.sep + config.data_written_output_file + "." + config.xy_output_extension
data_read_output_file_path = config.xy_output_parent + os.path.sep + config.data_read_output_file + "." + config.xy_output_extension
scatter_file_path = config.scatter_parent_folder + os.path.sep + config.scatter_sim_file + "." + config.xy_extension
gc_file_path = config.xy_parent_folder + os.path.sep + config.xy_sim_file + "." + config.xy_extension
figCount = 1;
#Run a cleanup incase anything was already generated
performCleanup()
gc_benchmark_initialised = False
# Run the following set of instructions for all possible VM Arguments
if config.xy_plots:
total_gc_time_for_all_runs = 0.0
gc_benchmark_file = None
gc_benchmark_file_path = config.xy_output_parent + os.path.sep + config.gc_output_file + "_" + config.xy_sim_file + "." + config.xy_output_extension
for key in sorted(config.xy_vm_argument):
print "-----------------------------------------------------------\n"
print "Running with Heap Size : "+ str(key)+"MB" + "\n"
# vm_argument = "-Xmx" + str(key)+"M"
# -Xmx50M -Xloggc:benchmark/gc1.log -verbose:gc -XX:+PrintGCDetails
heap_size = "-Xmx" + str(key)+"M"
gc_output_log_file_path = config.xy_output_parent + os.path.sep + config.gc_log_file + "_" + config.xy_sim_file + "_" + str(key) + "m" + "." + config.gc_log_extension
gc_log = "-Xloggc:"+ gc_output_log_file_path
# Create the directory already because Java won't create it for Log files
directory = os.path.dirname(gc_output_log_file_path)
if not os.path.exists(directory):
os.makedirs(directory)
verbose_gc = "-verbose:gc"
print_gc = "-XX:+PrintGCDetails"
total_gc_time_for_all_runs = 0.0
# Perform Cleanup - Delete GC Log if it exists
deleteFile(gc_output_log_file_path)
for i in xrange(0,config.runs):
print "Run Count :" + str(i+1) + "\n"
# This will simulate the calling of "java -Xmx50M -cp build:lib/* jitd.benchmark.BenchmarkGenerator"
#p = subprocess.Popen(["java", vm_argument, "-cp", config.classpath,"jitd.benchmark.BenchmarkGenerator", xy_file_path, xy_output_file_path])
p = subprocess.Popen(["java", heap_size, gc_log, verbose_gc, print_gc, "-cp", config.classpath,"jitd.benchmark.BenchmarkGenerator",xy_file_path, xy_output_file_path])
# Wait for the above process to complete.
# Removing this statement might cause following instructions to run before the previous command completes executions
p.wait()
print "Running Cleanup operations for Run "+str(i+1)+"\n"
# Delete all the generated data files
performCleanup()
print "Cleanup operations for Run "+str(i+1)+"\n"
time.sleep(5)
# Analyzing the logs
print "Analyzing the GC Log for Heap Size : "+ str(key)+"MB" + "\n"
gc_time = 0
if not gc_benchmark_initialised:
gc_benchmark_file = open(gc_benchmark_file_path, "w")
gc_benchmark_file.write("Heap Size (in MB),Time spent in Garbage Collection(in seconds)\n")
gc_benchmark_initialised = True
with open(gc_output_log_file_path) as f:
for line in f:
# If line starts with decimal
if re.match("^\d+\.\d+",line):
# Find all decimals, we will need 1st in all decimals
decimals = re.findall("\d+\.\d+", line)
if len(decimals) > 1:
# print decimals[1]
gc_time = gc_time + float(decimals[1])
print "Time taken in Garbage Collection Run "+str(i+1)+"\n"
total_gc_time_for_all_runs = total_gc_time_for_all_runs + gc_time
#print "\n"
average_gc_time = total_gc_time_for_all_runs / config.runs
print "Average Total Time spent in GC for Heap Size of " + str(key)+"MB :" + str(average_gc_time) + " seconds"
gc_benchmark_file.write(str(key)+","+str(average_gc_time)+"\n")
print "-----------------------------------------------------------\n"
# Close the file
gc_benchmark_file.close()
print "All the runs have completed successfully\n"
print "\n"
if config.gc_plots:
# Plot the graph
# GC Time vs Heap Size
figure = plt.figure(figCount)
data = np.genfromtxt(gc_benchmark_file_path, delimiter=',', invalid_raise = False, skip_header=1, names=['x', 'y'])
plt.plot(data['x'], data['y'],'o-')
plt.xlabel("Heap Size (in MB)")
plt.ylabel("Time spent in Garbage Collection (in seconds)")
plt.title("Time spent in Garbage Collection on different Heap Sizes")
plt.grid(True)
figure.savefig(config.xy_output_parent+os.path.sep+'gc-time-vs-heap-size-'+config.xy_sim_file+'.png')
figure.show()
figCount = figCount + 1
print "Fetching data from the logs to generate averaged data.\n"
print "\n"
# Call the program to Analyze the generated log and put it in a CSV
p = subprocess.Popen(["java", "-cp", config.classpath,"jitd.benchmark.BenchmarkLogAnalyzer",xy_output_file_path, time_output_file_path, data_written_output_file_path, data_read_output_file_path])
p.wait()
print "Data Calculation completed."
print "Generating graphs"
# Calculate the generated CSV File names based on the scatter
# Plot the graphs
if config.xy_heap_vs_time:
figure1 = plt.figure(figCount)
# Time vs Heap Size Graph
data = np.genfromtxt(time_output_file_path, delimiter=',', invalid_raise = False, skip_header=1, names=['x', 'y'])
plt.plot(data['x'], data['y'],'o-')
plt.xlabel("Heap Size (in MB)")
plt.ylabel("Time Taken (in seconds)")
plt.title("Time taken in cracker mode on different Heap Sizes")
plt.grid(True)
figure1.savefig(config.xy_output_parent+os.path.sep+'time-vs-heap-size-'+config.xy_sim_file+'.png')
figure1.show()
figCount = figCount + 1
if config.xy_heap_vs_data_written:
# Data Written vs Heap Size
figure2 = plt.figure(figCount)
data = np.genfromtxt(data_written_output_file_path, delimiter=',', invalid_raise = False, skip_header=1, names=['x', 'y'])
plt.plot(data['x'], data['y'],'o-')
plt.xlabel("Heap Size (in MB)")
plt.ylabel("Total data written to disk (in MB)")
plt.title("Total data written to disk on different Heap Sizes")
plt.grid(True)
figure2.savefig(config.xy_output_parent+os.path.sep+'bytes-written-vs-heap-size-'+config.xy_sim_file+'.png')
figure2.show()
figCount = figCount + 1
if config.xy_heap_vs_data_read:
# Data Read vs Heap Size
figure3 = plt.figure(figCount)
data = np.genfromtxt(data_read_output_file_path, delimiter=',', invalid_raise = False, skip_header=1, names=['x', 'y'])
plt.plot(data['x'], data['y'],'o-')
plt.xlabel("Heap Size (in MB)")
plt.ylabel("Total data read from the disk (in MB)")
plt.title("Total data read from disk on different Heap Sizes")
plt.grid(True)
figure3.savefig(config.xy_output_parent+os.path.sep+'bytes-read-vs-heap-size-'+config.xy_sim_file+'.png')
figure3.show()
figCount = figCount + 1
if config.total_time_vs_gc_time:
figure = plt.figure(figCount)
ax = figure.add_subplot(111)
# Time vs Heap Size Graph
data1 = np.genfromtxt(time_output_file_path, delimiter=',', invalid_raise = False, skip_header=1, names=['x', 'y'])
data2 = np.genfromtxt(gc_benchmark_file_path, delimiter=',', invalid_raise = False, skip_header=1, names=['x', 'y'])
index = np.arange(len(data1))
width = 0.25
rects1 = ax.bar(index, data1['y'], width, color = 'b')
rects2 = ax.bar(index + width, data2['y'], width, color = 'r')
ax.set_xlabel("Heap Size (in MB)")
ax.set_xticks(index + width)
ax.set_xticklabels(data1['x'])
ax.set_ylabel("Time Taken (in seconds)")
ax.set_title("Time taken in cracker mode on different Heap Sizes")
ax.legend((rects1[0], rects2[0]), ('Total Runtime', 'Garbage Collection'))
ax.grid(True)
figure.savefig(config.xy_output_parent+os.path.sep+'gc-time-total-runtime-vs-heap-size-'+config.xy_sim_file+'.png')
figure.show()
figCount = figCount + 1
# Make sure all data files are deleted before exiting
# Delete all the generated data files
performCleanup()
# Generate the scatter plots
if config.scatter_plots:
gc_benchmark_file_path = config.scatter_output_parent + os.path.sep + config.gc_output_file + "_" + config.scatter_sim_file + "." + config.scatter_output_extension
gc_benchmark_initialised = False
total_runtime_list = []
idx = 0
for key in sorted(config.scatter_vm_argument):
# vm_argument = "-Xmx" + str(key)+"M"
# vm_argument = "-Xmx" + str(key)+"M"
# -Xmx50M -Xloggc:benchmark/gc1.log -verbose:gc -XX:+PrintGCDetails
heap_size = "-Xmx" + str(key)+"M"
gc_output_log_file_path = config.scatter_output_parent + os.path.sep + config.gc_log_file + "_" + config.scatter_sim_file + "_" + str(key) + "m" + "." + config.gc_log_extension
gc_log = "-Xloggc:"+ gc_output_log_file_path
# Create the directory already because Java won't create it for Log files
directory = os.path.dirname(gc_output_log_file_path)
if not os.path.exists(directory):
os.makedirs(directory)
verbose_gc = "-verbose:gc"
print_gc = "-XX:+PrintGCDetails"
# Perform Cleanup - Delete GC Log if it exists
deleteFile(gc_output_log_file_path)
scatter_output_file_path = config.scatter_output_parent + os.path.sep + config.scatter_output_file + "_" + config.scatter_sim_file + "_" + str(key) + "m" + "." + config.scatter_output_extension
print "-----------------------------------------------------------\n"
print "Running with Heap Size : "+ str(key)+"MB" + "\n"
# p = subprocess.Popen(["java", vm_argument, "-cp", config.classpath,"jitd.benchmark.ScriptDriverBenchmark",scatter_file_path, scatter_output_file_path])
p = subprocess.Popen(["java", heap_size, gc_log, verbose_gc, print_gc, "-cp", config.classpath,"jitd.benchmark.ScriptDriverBenchmark",scatter_file_path, scatter_output_file_path])
# Wait for the above process to complete.
# Removing this statement might cause following instructions to run before the previous command completes executions
p.wait()
# Delete all the generated data files
performCleanup()
print "Cleanup operations finished\n"
time.sleep(5)
print "\n"
# Analyzing the logs
print "Analyzing the GC Log for Heap Size : "+ str(key)+"MB" + "\n"
gc_time = 0
if not gc_benchmark_initialised:
gc_benchmark_file = open(gc_benchmark_file_path, "w")
gc_benchmark_file.write("Heap Size (in MB),Time spent in Garbage Collection(in seconds)\n")
gc_benchmark_initialised = True
with open(gc_output_log_file_path) as f:
for line in f:
# If line starts with decimal
if re.match("^\d+\.\d+",line):
# Find all decimals, we will need 1st in all decimals
decimals = re.findall("\d+\.\d+", line)
if len(decimals) > 1:
# print decimals[1]
gc_time = gc_time + float(decimals[1])
print "Total Time spent in Garbage Collection for Heap Size of " + str(key)+"MB :" + str(gc_time) + " seconds \n"
gc_benchmark_file.write(str(key)+","+str(gc_time)+"\n")
# Scatter plot for
# using invalid_raise = False, ignores any row with missing values without raising exception
# using dtaype = None, makes python calculate data types by itself
data = np.genfromtxt(scatter_output_file_path, delimiter=',', invalid_raise = False, dtype = None, names=['x','y','z'])
# Calculate the total runtime and put it in the list
total_runtime = sum(data['y'])
total_runtime_list.insert(idx, total_runtime)
idx += 1
use_color = {"WRITE":"red","READ":"blue"}
color_map = []
s_map = []
i = 0
for x in data['z']:
color_map.insert(i,use_color[x])
if(x == "WRITE"):
s_map.insert(i,10)
else:
s_map.insert(i,1)
i = i + 1
figure = plt.figure(figCount)
# Specify color maps for data points using color = color_map
plt.scatter(data['x'],data['y'], s=s_map, color=color_map)
plt.xlabel("Number of Iterations")
plt.yscale('log')
plt.ylabel("Time (in seconds)")
plt.title("System Performance in cracker mode with heap size "+str(key)+"MB")
plt.grid(True)
plt.plot()
plt.ylim([0.0000001,1000])
# Legend
classes = ['Write','Read']
class_colours = ['r','b']
recs = []
# Generate the legend for the graph
for i in range(0,len(class_colours)):
recs.append(mpatches.Rectangle((0,0),1,1,fc=class_colours[i]))
plt.legend(recs,classes)
figure.savefig(config.xy_output_parent+os.path.sep+'performance_'+str(key)+"m"+'.png')
figure.show()
figCount = figCount + 1
print "\nTotal runtime for Heap Size of "+str(key) + "MB" + " :" + str(total_runtime)
print "-----------------------------------------------------------\n"
if config.total_time_vs_gc_time:
figure = plt.figure(figCount)
ax = figure.add_subplot(111)
# Close the file
gc_benchmark_file.close()
# Time vs Heap Size Graph
data1 = total_runtime_list
data2 = np.genfromtxt(gc_benchmark_file_path, delimiter=',', invalid_raise = False, skip_header=1, names=['x', 'y'])
index = np.arange(len(data1))
width = 0.25
rects1 = ax.bar(index, data1, width, color = 'b')
rects2 = ax.bar(index + width, data2['y'], width, color = 'r')
ax.set_xlabel("Heap Size (in MB)")
ax.set_xticks(index + width)
ax.set_xticklabels(data2['x'])
ax.set_ylabel("Time Taken (in seconds)")
ax.set_title("Time taken in cracker mode on different Heap Sizes for Scatter Plots")
ax.legend((rects1[0], rects2[0]), ('Total Runtime', 'Garbage Collection'))
ax.grid(True)
figure.savefig(config.scatter_output_parent+os.path.sep+'gc-time-total-runtime-vs-heap-size-'+config.scatter_sim_file+'.png')
figure.show()
figCount = figCount + 1
# Following line will keep the graphs alive
print "Press Enter or Ctrl-C to exit"
raw_input() | {
"content_hash": "eff1a3ece72163c40a0d841e1f104a25",
"timestamp": "",
"source": "github",
"line_count": 335,
"max_line_length": 196,
"avg_line_length": 44.61194029850746,
"alnum_prop": 0.6744730679156908,
"repo_name": "rmp91/jitd",
"id": "f5cc51a30b976da7e68dbc81e780201d85fc1c94",
"size": "14960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "java/benchmark.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "65"
},
{
"name": "C",
"bytes": "58628"
},
{
"name": "C++",
"bytes": "75056"
},
{
"name": "CoffeeScript",
"bytes": "6639"
},
{
"name": "HTML",
"bytes": "8651"
},
{
"name": "Java",
"bytes": "242554"
},
{
"name": "JavaScript",
"bytes": "45"
},
{
"name": "Makefile",
"bytes": "3001"
},
{
"name": "OCaml",
"bytes": "51741"
},
{
"name": "Python",
"bytes": "19235"
},
{
"name": "Ruby",
"bytes": "14127"
},
{
"name": "Shell",
"bytes": "708"
}
],
"symlink_target": ""
} |
"""
Setup the execution environment based on the command line argument
"""
import logging
from os import environ as env
import config
class Environment(object):
# these variables will be set only if not set already
# priority: 0
default_variables = {}
# these values will be added to existing ones
# priority: 1
updated_variables = {}
# these variables will be re-written (regardless of the previous value)
# priority: 2
forced_variables = {}
# these variables will be set if experiments are run without re-building
# priority: 3
only_run_variables = {}
# these variables will be set if experiments are build but not run
# priority: 3
only_build_variables = {}
# these variables will be set in the debug mode. They have the highest priority
# priority: 4
debug_variables = {}
def __init__(self, debug=False, verbose=False):
self.debug = debug
self.verbose = verbose
env["DEBUG"] = "1" if self.debug else ""
env["VERBOSE"] = "1" if self.verbose else ""
def setup(self, env_type='both'):
logging.debug("Setting up the environment. Type: {} - {}".format(self.__class__.__name__, env_type))
# default
for var, value in self.default_variables.items():
current_value = env.get(var)
env[var] = value if not current_value else current_value
# updated
for var, value in self.updated_variables.items():
current_value = env.get(var)
env[var] = value if not current_value else current_value + value
# forced
for var, value in self.forced_variables.items():
env[var] = value if value else ""
# build only
if env_type == 'build':
env['EXP_NO_RUN'] = '1'
for var, value in self.only_build_variables.items():
env[var] = value
# run only
elif env_type == 'run':
env['EXP_NO_BUILD'] = '1'
for var, value in self.only_run_variables.items():
env[var] = value
# debug
if self.debug:
for var, value in self.debug_variables.items():
env[var] = value
def set_all_environments(debug=False, verbose=False, env_type='both'):
"""
Simple wrapper
"""
Envs = getattr(config.Config(), "environments", [])
for EnvClass in Envs:
env_obj = EnvClass(debug, verbose)
env_obj.setup(env_type)
| {
"content_hash": "6889b17bd799cacbcfffb5c2f02034fe",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 108,
"avg_line_length": 29.341176470588234,
"alnum_prop": 0.5926222935044105,
"repo_name": "tudinfse/fex",
"id": "7ddd9f8debb15c2cc1a7fd64962bb0b387eedbef",
"size": "2494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/environment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "16692698"
},
{
"name": "C++",
"bytes": "4413677"
},
{
"name": "Dockerfile",
"bytes": "1518"
},
{
"name": "Makefile",
"bytes": "243186"
},
{
"name": "Objective-C",
"bytes": "34374"
},
{
"name": "PHP",
"bytes": "1309"
},
{
"name": "Python",
"bytes": "111232"
},
{
"name": "Shell",
"bytes": "23352"
}
],
"symlink_target": ""
} |
from .validateHub import jsonschemaErrorReport, validateHgncSymbol, symbolStatus
from .ontology_lookup import * | {
"content_hash": "34e025233bad0f0faa97354246727bd2",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 80,
"avg_line_length": 55.5,
"alnum_prop": 0.8648648648648649,
"repo_name": "IHEC/ihec-ecosystems",
"id": "f8db5061da0a174a8c6a18b09918e854fb89e4a9",
"size": "111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "IHEC_Data_Hub/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "110141"
},
{
"name": "Shell",
"bytes": "2613"
}
],
"symlink_target": ""
} |
import unittest
import logging
try:
import mock # Python 2
except ImportError:
from unittest import mock # Python 3
from nose.tools import raises
from ryu.services.protocols.bgp import bgpspeaker
LOG = logging.getLogger(__name__)
class Test_BGPSpeaker(unittest.TestCase):
"""
Test case for bgp.bgpspeaker.BGPSpeaker
"""
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__',
mock.MagicMock(return_value=None))
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.call')
def test_evpn_prefix_add_mac_ip_adv(self, mock_call):
# Prepare test data
route_type = bgpspeaker.EVPN_MAC_IP_ADV_ROUTE
route_dist = '65000:100'
esi = 0 # denotes single-homed
ethernet_tag_id = 200
mac_addr = 'aa:bb:cc:dd:ee:ff'
ip_addr = '192.168.0.1'
next_hop = '10.0.0.1'
expected_kwargs = {
'route_type': route_type,
'route_dist': route_dist,
'esi': esi,
'ethernet_tag_id': ethernet_tag_id,
'mac_addr': mac_addr,
'ip_addr': ip_addr,
'next_hop': next_hop,
}
# Test
speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1')
speaker.evpn_prefix_add(
route_type=route_type,
route_dist=route_dist,
esi=esi,
ethernet_tag_id=ethernet_tag_id,
mac_addr=mac_addr,
ip_addr=ip_addr,
next_hop=next_hop,
)
# Check
mock_call.assert_called_with(
'evpn_prefix.add_local', **expected_kwargs)
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__',
mock.MagicMock(return_value=None))
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.call')
def test_evpn_prefix_add_multicast_etag(self, mock_call):
# Prepare test data
route_type = bgpspeaker.EVPN_MULTICAST_ETAG_ROUTE
route_dist = '65000:100'
esi = 0 # denotes single-homed
ethernet_tag_id = 200
mac_addr = 'aa:bb:cc:dd:ee:ff'
ip_addr = '192.168.0.1'
next_hop = '10.0.0.1'
expected_kwargs = {
'route_type': route_type,
'route_dist': route_dist,
# 'esi': esi, # should be ignored
'ethernet_tag_id': ethernet_tag_id,
# 'mac_addr': mac_addr, # should be ignored
'ip_addr': ip_addr,
'next_hop': next_hop,
}
# Test
speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1')
speaker.evpn_prefix_add(
route_type=route_type,
route_dist=route_dist,
esi=esi,
ethernet_tag_id=ethernet_tag_id,
mac_addr=mac_addr,
ip_addr=ip_addr,
next_hop=next_hop,
)
# Check
mock_call.assert_called_with(
'evpn_prefix.add_local', **expected_kwargs)
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__',
mock.MagicMock(return_value=None))
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.call')
def test_evpn_prefix_add_multicast_etag_no_next_hop(self, mock_call):
# Prepare test data
route_type = bgpspeaker.EVPN_MULTICAST_ETAG_ROUTE
route_dist = '65000:100'
esi = 0 # denotes single-homed
ethernet_tag_id = 200
mac_addr = 'aa:bb:cc:dd:ee:ff'
ip_addr = '192.168.0.1'
next_hop = '0.0.0.0' # the default value
expected_kwargs = {
'route_type': route_type,
'route_dist': route_dist,
# 'esi': esi, # should be ignored
'ethernet_tag_id': ethernet_tag_id,
# 'mac_addr': mac_addr, # should be ignored
'ip_addr': ip_addr,
'next_hop': next_hop,
}
# Test
speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1')
speaker.evpn_prefix_add(
route_type=route_type,
route_dist=route_dist,
esi=esi,
ethernet_tag_id=ethernet_tag_id,
mac_addr=mac_addr,
ip_addr=ip_addr,
# next_hop=next_hop, # omitted
)
# Check
mock_call.assert_called_with(
'evpn_prefix.add_local', **expected_kwargs)
@raises(ValueError)
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__',
mock.MagicMock(return_value=None))
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.call')
def test_evpn_prefix_add_invalid_route_type(self, mock_call):
# Prepare test data
route_type = 'foobar' # Invalid EVPN route type
route_dist = '65000:100'
esi = 0 # denotes single-homed
ethernet_tag_id = 200
mac_addr = 'aa:bb:cc:dd:ee:ff'
ip_addr = '192.168.0.1'
next_hop = '10.0.0.1'
# Test
speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1')
speaker.evpn_prefix_add(
route_type=route_type,
route_dist=route_dist,
esi=esi,
ethernet_tag_id=ethernet_tag_id,
mac_addr=mac_addr,
ip_addr=ip_addr,
next_hop=next_hop,
)
# Check
mock_call.assert_called_with(
'evpn_prefix.add_local', 'Invalid arguments detected')
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__',
mock.MagicMock(return_value=None))
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.call')
def test_evpn_prefix_del_mac_ip_adv(self, mock_call):
# Prepare test data
route_type = bgpspeaker.EVPN_MAC_IP_ADV_ROUTE
route_dist = '65000:100'
esi = 0 # denotes single-homed
ethernet_tag_id = 200
mac_addr = 'aa:bb:cc:dd:ee:ff'
ip_addr = '192.168.0.1'
expected_kwargs = {
'route_type': route_type,
'route_dist': route_dist,
'esi': esi,
'ethernet_tag_id': ethernet_tag_id,
'mac_addr': mac_addr,
'ip_addr': ip_addr,
}
# Test
speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1')
speaker.evpn_prefix_del(
route_type=route_type,
route_dist=route_dist,
esi=esi,
ethernet_tag_id=ethernet_tag_id,
mac_addr=mac_addr,
ip_addr=ip_addr,
)
# Check
mock_call.assert_called_with(
'evpn_prefix.delete_local', **expected_kwargs)
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__',
mock.MagicMock(return_value=None))
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.call')
def test_evpn_prefix_del_multicast_etag(self, mock_call):
# Prepare test data
route_type = bgpspeaker.EVPN_MULTICAST_ETAG_ROUTE
route_dist = '65000:100'
esi = 0 # denotes single-homed
ethernet_tag_id = 200
mac_addr = 'aa:bb:cc:dd:ee:ff'
ip_addr = '192.168.0.1'
expected_kwargs = {
'route_type': route_type,
'route_dist': route_dist,
# 'esi': esi, # should be ignored
'ethernet_tag_id': ethernet_tag_id,
# 'mac_addr': mac_addr, # should be ignored
'ip_addr': ip_addr,
}
# Test
speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1')
speaker.evpn_prefix_del(
route_type=route_type,
route_dist=route_dist,
esi=esi,
ethernet_tag_id=ethernet_tag_id,
mac_addr=mac_addr,
ip_addr=ip_addr,
)
# Check
mock_call.assert_called_with(
'evpn_prefix.delete_local', **expected_kwargs)
@raises(ValueError)
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__',
mock.MagicMock(return_value=None))
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.call')
def test_evpn_prefix_del_invalid_route_type(self, mock_call):
# Prepare test data
route_type = 'foobar' # Invalid EVPN route type
route_dist = '65000:100'
esi = 0 # denotes single-homed
ethernet_tag_id = 200
mac_addr = 'aa:bb:cc:dd:ee:ff'
ip_addr = '192.168.0.1'
# Test
speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1')
speaker.evpn_prefix_del(
route_type=route_type,
route_dist=route_dist,
esi=esi,
ethernet_tag_id=ethernet_tag_id,
mac_addr=mac_addr,
ip_addr=ip_addr,
)
# Check
mock_call.assert_called_with(
'evpn_prefix.delete_local', 'Invalid arguments detected')
| {
"content_hash": "b356f71094e3d8d8d36735a327264c3a",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 76,
"avg_line_length": 33.60919540229885,
"alnum_prop": 0.551641586867305,
"repo_name": "ool2016-seclab/quarantineSystem",
"id": "243ef4b340ffc914b727951b2056df8488a387c3",
"size": "9385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ryu/tests/unit/services/protocols/bgp/test_bgpspeaker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "28540"
},
{
"name": "CSS",
"bytes": "306"
},
{
"name": "Erlang",
"bytes": "874721"
},
{
"name": "Gnuplot",
"bytes": "1094"
},
{
"name": "HTML",
"bytes": "306"
},
{
"name": "JavaScript",
"bytes": "8436"
},
{
"name": "Makefile",
"bytes": "88"
},
{
"name": "Python",
"bytes": "5650402"
},
{
"name": "Shell",
"bytes": "9213"
}
],
"symlink_target": ""
} |
"""
simple thread exaple
"""
import threading
def worker(num):
"""
Thread worker funtion
"""
print "Worker: %s" % num
return
for i in xrange(5):
t = threading.Thread(target=worker, args=(i,))
t.start()
| {
"content_hash": "8128e8b18caf9a2f3e40bba6212762a2",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 50,
"avg_line_length": 13.055555555555555,
"alnum_prop": 0.5829787234042553,
"repo_name": "bristy/HackYourself",
"id": "28a0ec83c9af3b0ac9671330526cba636c415bbd",
"size": "235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python_programs/thread1.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "453564"
},
{
"name": "HTML",
"bytes": "145732"
},
{
"name": "Java",
"bytes": "126255"
},
{
"name": "Makefile",
"bytes": "14281"
},
{
"name": "Python",
"bytes": "140873"
}
],
"symlink_target": ""
} |
from typing import Any
import django
import mock
from django.test import TestCase
from django.utils import translation
from django.conf import settings
from django.http import HttpResponse
from django.core import mail
from http.cookies import SimpleCookie
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.management.commands import makemessages
from zerver.lib.notifications import enqueue_welcome_emails
from django.utils.timezone import now as timezone_now
class EmailTranslationTestCase(ZulipTestCase):
def test_email_translation(self) -> None:
def check_translation(phrase: str, request_type: str, *args: Any, **kwargs: Any) -> None:
if request_type == "post":
self.client_post(*args, **kwargs)
elif request_type == "patch": # nocoverage: see comment below
self.client_patch(*args, **kwargs)
email_message = mail.outbox[0]
self.assertIn(phrase, email_message.body)
for i in range(len(mail.outbox)):
mail.outbox.pop()
hamlet = self.example_user("hamlet")
hamlet.default_language = "de"
hamlet.save()
realm = hamlet.realm
realm.default_language = "de"
realm.save()
self.login(hamlet.email)
# TODO: Uncomment and replace with translation once we have German translations for the strings
# in confirm_new_email.txt.
# Also remove the "nocoverage" from check_translation above.
# check_translation("Viele Grüße", "patch", "/json/settings", {"email": "hamlets-new@zulip.com"})
check_translation("Incrível!", "post", "/accounts/home/", {"email": "new-email@zulip.com"}, HTTP_ACCEPT_LANGUAGE="pt")
check_translation("Danke, dass Du", "post", '/accounts/find/', {'emails': hamlet.email})
check_translation("Hallo", "post", "/json/invites", {"invitee_emails": "new-email@zulip.com", "stream": ["Denmark"]})
with self.settings(DEVELOPMENT_LOG_EMAILS=True):
enqueue_welcome_emails(hamlet)
check_translation("Viele Grüße", "")
class TranslationTestCase(ZulipTestCase):
"""
Tranlations strings should change with locale. URLs should be locale
aware.
"""
def tearDown(self) -> None:
translation.activate(settings.LANGUAGE_CODE)
# e.g. self.client_post(url) if method is "post"
def fetch(self, method: str, url: str, expected_status: int, **kwargs: Any) -> HttpResponse:
response = getattr(self.client, method)(url, **kwargs)
self.assertEqual(response.status_code, expected_status,
msg="Expected %d, received %d for %s to %s" % (
expected_status, response.status_code, method, url))
return response
def test_accept_language_header(self) -> None:
languages = [('en', u'Sign up'),
('de', u'Registrieren'),
('sr', u'Упишите се'),
('zh-hans', u'注册'),
]
for lang, word in languages:
response = self.fetch('get', '/integrations/', 200,
HTTP_ACCEPT_LANGUAGE=lang)
self.assert_in_response(word, response)
def test_cookie(self) -> None:
languages = [('en', u'Sign up'),
('de', u'Registrieren'),
('sr', u'Упишите се'),
('zh-hans', u'注册'),
]
for lang, word in languages:
# Applying str function to LANGUAGE_COOKIE_NAME to convert unicode
# into an ascii otherwise SimpleCookie will raise an exception
self.client.cookies = SimpleCookie({str(settings.LANGUAGE_COOKIE_NAME): lang}) # type: ignore # https://github.com/python/typeshed/issues/1476
response = self.fetch('get', '/integrations/', 200)
self.assert_in_response(word, response)
def test_i18n_urls(self) -> None:
languages = [('en', u'Sign up'),
('de', u'Registrieren'),
('sr', u'Упишите се'),
('zh-hans', u'注册'),
]
for lang, word in languages:
response = self.fetch('get', '/{}/integrations/'.format(lang), 200)
self.assert_in_response(word, response)
class JsonTranslationTestCase(ZulipTestCase):
def tearDown(self) -> None:
translation.activate(settings.LANGUAGE_CODE)
@mock.patch('zerver.lib.request._')
def test_json_error(self, mock_gettext: Any) -> None:
dummy_value = "this arg is bad: '{var_name}' (translated to German)"
mock_gettext.return_value = dummy_value
email = self.example_email('hamlet')
self.login(email)
result = self.client_post("/json/invites",
HTTP_ACCEPT_LANGUAGE='de')
expected_error = u"this arg is bad: 'invitee_emails' (translated to German)"
self.assert_json_error_contains(result,
expected_error,
status_code=400)
@mock.patch('zerver.views.auth._')
def test_jsonable_error(self, mock_gettext: Any) -> None:
dummy_value = "Some other language"
mock_gettext.return_value = dummy_value
email = self.example_email('hamlet')
self.login(email)
result = self.client_get("/de/accounts/login/jwt/")
self.assert_json_error_contains(result,
dummy_value,
status_code=400)
class FrontendRegexTestCase(TestCase):
def test_regexes(self) -> None:
command = makemessages.Command()
data = [
('{{#tr context}}english text with __variable__{{/tr}}{{/tr}}',
'english text with __variable__'),
('{{t "english text" }}, "extra"}}',
'english text'),
("{{t 'english text' }}, 'extra'}}",
'english text'),
('i18n.t("english text"), "extra",)',
'english text'),
('i18n.t("english text", context), "extra",)',
'english text'),
("i18n.t('english text'), 'extra',)",
'english text'),
("i18n.t('english text', context), 'extra',)",
'english text'),
]
for input_text, expected in data:
result = command.extract_strings(input_text)
self.assertEqual(len(result), 1)
self.assertEqual(result[0], expected)
| {
"content_hash": "91ec11a70c8621ec09c5f6129eeb0eec",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 155,
"avg_line_length": 37.93103448275862,
"alnum_prop": 0.5675757575757576,
"repo_name": "dhcrzf/zulip",
"id": "d4cc4843c2abfb336569e46428ca3c2bd90e45e3",
"size": "6669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/tests/test_i18n.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "436713"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "673974"
},
{
"name": "JavaScript",
"bytes": "2951950"
},
{
"name": "Perl",
"bytes": "398747"
},
{
"name": "Puppet",
"bytes": "72908"
},
{
"name": "Python",
"bytes": "6188005"
},
{
"name": "Ruby",
"bytes": "6110"
},
{
"name": "Shell",
"bytes": "118284"
},
{
"name": "TypeScript",
"bytes": "9543"
}
],
"symlink_target": ""
} |
import operator
import os
import zlib
from collections import MutableSequence, Iterable, Sequence, Sized
from itertools import chain
from numbers import Real, Integral
from functools import reduce
from warnings import warn
from threading import Lock
import numpy as np
import bottleneck as bn
from scipy import sparse as sp
import Orange.data # import for io.py
from Orange.data import (
_contingency, _valuecount,
Domain, Variable, Storage, StringVariable, Unknown, Value, Instance,
ContinuousVariable, DiscreteVariable, MISSING_VALUES
)
from Orange.data.util import SharedComputeValue
from Orange.statistics.util import bincount, countnans, contingency, stats as fast_stats
from Orange.util import flatten
__all__ = ["dataset_dirs", "get_sample_datasets_dir", "RowInstance", "Table"]
def get_sample_datasets_dir():
orange_data_table = os.path.dirname(__file__)
dataset_dir = os.path.join(orange_data_table, '..', 'datasets')
return os.path.realpath(dataset_dir)
dataset_dirs = ['', get_sample_datasets_dir()]
"""Domain conversion cache used in Table.from_table. It is global so that
chaining of domain conversions also works with caching even with descendants
of Table."""
_conversion_cache = None
class RowInstance(Instance):
sparse_x = None
sparse_y = None
sparse_metas = None
_weight = None
def __init__(self, table, row_index):
"""
Construct a data instance representing the given row of the table.
"""
self.table = table
self._domain = table.domain
self.row_index = row_index
self.id = table.ids[row_index]
self._x = table.X[row_index]
if sp.issparse(self._x):
self.sparse_x = self._x
self._x = np.asarray(self._x.todense())[0]
self._y = table._Y[row_index]
if sp.issparse(self._y):
self.sparse_y = self._y
self._y = np.asarray(self._y.todense())[0]
self._metas = table.metas[row_index]
if sp.issparse(self._metas):
self.sparse_metas = self._metas
self._metas = np.asarray(self._metas.todense())[0]
@property
def weight(self):
if not self.table.has_weights():
return 1
return self.table.W[self.row_index]
@weight.setter
def weight(self, weight):
if not self.table.has_weights():
self.table.set_weights()
self.table.W[self.row_index] = weight
def set_class(self, value):
self._check_single_class()
if not isinstance(value, Real):
value = self.table.domain.class_var.to_val(value)
self._y[0] = value
if self.sparse_y:
self.table._Y[self.row_index, 0] = value
def __setitem__(self, key, value):
if not isinstance(key, Integral):
key = self._domain.index(key)
if isinstance(value, str):
var = self._domain[key]
value = var.to_val(value)
if key >= 0:
if not isinstance(value, Real):
raise TypeError("Expected primitive value, got '%s'" %
type(value).__name__)
if key < len(self._x):
self._x[key] = value
if self.sparse_x:
self.table.X[self.row_index, key] = value
else:
self._y[key - len(self._x)] = value
if self.sparse_y:
self.table._Y[self.row_index, key - len(self._x)] = value
else:
self._metas[-1 - key] = value
if self.sparse_metas:
self.table.metas[self.row_index, -1 - key] = value
def _str(self, limit):
def sp_values(matrix, variables):
if not sp.issparse(matrix):
return Instance.str_values(matrix[row], variables, limit)
begptr, endptr = matrix.indptr[row:row + 2]
rendptr = endptr if not limit else min(endptr, begptr + 5)
variables = [variables[var]
for var in matrix.indices[begptr:rendptr]]
s = ", ".join(
"{}={}".format(var.name, var.str_val(val))
for var, val in zip(variables, matrix.data[begptr:rendptr]))
if limit and rendptr != endptr:
s += ", ..."
return s
table = self.table
domain = table.domain
row = self.row_index
s = "[" + sp_values(table.X, domain.attributes)
if domain.class_vars:
s += " | " + sp_values(table._Y, domain.class_vars)
s += "]"
if self._domain.metas:
s += " {" + sp_values(table.metas, domain.metas) + "}"
return s
def __str__(self):
return self._str(False)
def __repr__(self):
return self._str(True)
class Columns:
def __init__(self, domain):
for v in chain(domain, domain.metas):
setattr(self, v.name.replace(" ", "_"), v)
# noinspection PyPep8Naming
class Table(MutableSequence, Storage):
__file__ = None
name = "untitled"
@property
def columns(self):
"""
A class whose attributes contain attribute descriptors for columns.
For a table `table`, setting `c = table.columns` will allow accessing
the table's variables with, for instance `c.gender`, `c.age` ets.
Spaces are replaced with underscores.
"""
return Columns(self.domain)
_next_instance_id = 0
_next_instance_lock = Lock()
@property
def Y(self):
if self._Y.shape[1] == 1:
return self._Y[:, 0]
return self._Y
@Y.setter
def Y(self, value):
if len(value.shape) == 1:
value = value[:, None]
self._Y = value
def __new__(cls, *args, **kwargs):
if not args and not kwargs:
return super().__new__(cls)
if 'filename' in kwargs:
args = [kwargs.pop('filename')]
if not args:
raise TypeError(
"Table takes at least 1 positional argument (0 given))")
if isinstance(args[0], str):
if args[0].startswith('https://') or args[0].startswith('http://'):
return cls.from_url(args[0], **kwargs)
else:
return cls.from_file(args[0], **kwargs)
elif isinstance(args[0], Table):
return cls.from_table(args[0].domain, args[0])
elif isinstance(args[0], Domain):
domain, args = args[0], args[1:]
if not args:
return cls.from_domain(domain, **kwargs)
if isinstance(args[0], Table):
return cls.from_table(domain, *args)
elif isinstance(args[0], list):
return cls.from_list(domain, *args)
else:
domain = None
return cls.from_numpy(domain, *args, **kwargs)
def __init__(self, *args, **kwargs):
# So subclasses can expect to call super without breakage; noop
pass
@classmethod
def from_domain(cls, domain, n_rows=0, weights=False):
"""
Construct a new `Table` with the given number of rows for the given
domain. The optional vector of weights is initialized to 1's.
:param domain: domain for the `Table`
:type domain: Orange.data.Domain
:param n_rows: number of rows in the new table
:type n_rows: int
:param weights: indicates whether to construct a vector of weights
:type weights: bool
:return: a new table
:rtype: Orange.data.Table
"""
self = cls()
self.domain = domain
self.n_rows = n_rows
self.X = np.zeros((n_rows, len(domain.attributes)))
self.Y = np.zeros((n_rows, len(domain.class_vars)))
if weights:
self.W = np.ones(n_rows)
else:
self.W = np.empty((n_rows, 0))
self.metas = np.empty((n_rows, len(self.domain.metas)), object)
cls._init_ids(self)
self.attributes = {}
return self
@classmethod
def from_table(cls, domain, source, row_indices=...):
"""
Create a new table from selected columns and/or rows of an existing
one. The columns are chosen using a domain. The domain may also include
variables that do not appear in the source table; they are computed
from source variables if possible.
The resulting data may be a view or a copy of the existing data.
:param domain: the domain for the new table
:type domain: Orange.data.Domain
:param source: the source table
:type source: Orange.data.Table
:param row_indices: indices of the rows to include
:type row_indices: a slice or a sequence
:return: a new table
:rtype: Orange.data.Table
"""
global _conversion_cache
def get_columns(row_indices, src_cols, n_rows, dtype=np.float64,
is_sparse=False):
if not len(src_cols):
if is_sparse:
return sp.csr_matrix((n_rows, 0), dtype=source.X.dtype)
else:
return np.zeros((n_rows, 0), dtype=source.X.dtype)
n_src_attrs = len(source.domain.attributes)
if all(isinstance(x, Integral) and 0 <= x < n_src_attrs
for x in src_cols):
return _subarray(source.X, row_indices, src_cols)
if all(isinstance(x, Integral) and x < 0 for x in src_cols):
arr = _subarray(source.metas, row_indices,
[-1 - x for x in src_cols])
if arr.dtype != dtype:
return arr.astype(dtype)
return arr
if all(isinstance(x, Integral) and x >= n_src_attrs
for x in src_cols):
return _subarray(source._Y, row_indices,
[x - n_src_attrs for x in src_cols])
if is_sparse:
a = sp.dok_matrix((n_rows, len(src_cols)), dtype=dtype)
else:
a = np.empty((n_rows, len(src_cols)), dtype=dtype)
def match_type(x):
""" Assure that matrix and column are both dense or sparse. """
if is_sparse == sp.issparse(x):
return x
elif is_sparse:
x = np.asarray(x)
return sp.csc_matrix(x.reshape(-1, 1).astype(np.float))
else:
return np.ravel(x.toarray())
shared_cache = _conversion_cache
for i, col in enumerate(src_cols):
if col is None:
a[:, i] = Unknown
elif not isinstance(col, Integral):
if isinstance(col, SharedComputeValue):
if (id(col.compute_shared), id(source)) not in shared_cache:
shared_cache[id(col.compute_shared), id(source)] = col.compute_shared(source)
shared = shared_cache[id(col.compute_shared), id(source)]
if row_indices is not ...:
a[:, i] = match_type(
col(source, shared_data=shared)[row_indices])
else:
a[:, i] = match_type(
col(source, shared_data=shared))
else:
if row_indices is not ...:
a[:, i] = match_type(col(source)[row_indices])
else:
a[:, i] = match_type(col(source))
elif col < 0:
a[:, i] = match_type(source.metas[row_indices, -1 - col])
elif col < n_src_attrs:
a[:, i] = match_type(source.X[row_indices, col])
else:
a[:, i] = match_type(
source._Y[row_indices, col - n_src_attrs])
if is_sparse:
a = a.tocsr()
return a
new_cache = _conversion_cache is None
try:
if new_cache:
_conversion_cache = {}
else:
cached = _conversion_cache.get((id(domain), id(source)))
if cached:
return cached
if domain == source.domain:
return cls.from_table_rows(source, row_indices)
if isinstance(row_indices, slice):
start, stop, stride = row_indices.indices(source.X.shape[0])
n_rows = (stop - start) // stride
if n_rows < 0:
n_rows = 0
elif row_indices is ...:
n_rows = len(source)
else:
n_rows = len(row_indices)
self = cls()
self.domain = domain
conversion = domain.get_conversion(source.domain)
self.X = get_columns(row_indices, conversion.attributes, n_rows,
is_sparse=sp.issparse(source.X))
if self.X.ndim == 1:
self.X = self.X.reshape(-1, len(self.domain.attributes))
self.Y = get_columns(row_indices, conversion.class_vars, n_rows,
is_sparse=sp.issparse(source.Y))
dtype = np.float64
if any(isinstance(var, StringVariable) for var in domain.metas):
dtype = np.object
self.metas = get_columns(row_indices, conversion.metas,
n_rows, dtype,
is_sparse=sp.issparse(source.metas))
if self.metas.ndim == 1:
self.metas = self.metas.reshape(-1, len(self.domain.metas))
if source.has_weights():
self.W = np.array(source.W[row_indices])
else:
self.W = np.empty((n_rows, 0))
self.name = getattr(source, 'name', '')
if hasattr(source, 'ids'):
self.ids = np.array(source.ids[row_indices])
else:
cls._init_ids(self)
self.attributes = getattr(source, 'attributes', {})
_conversion_cache[(id(domain), id(source))] = self
return self
finally:
if new_cache:
_conversion_cache = None
@classmethod
def from_table_rows(cls, source, row_indices):
"""
Construct a new table by selecting rows from the source table.
:param source: an existing table
:type source: Orange.data.Table
:param row_indices: indices of the rows to include
:type row_indices: a slice or a sequence
:return: a new table
:rtype: Orange.data.Table
"""
self = cls()
self.domain = source.domain
self.X = source.X[row_indices]
if self.X.ndim == 1:
self.X = self.X.reshape(-1, len(self.domain.attributes))
self.Y = source._Y[row_indices]
self.metas = source.metas[row_indices]
if self.metas.ndim == 1:
self.metas = self.metas.reshape(-1, len(self.domain.metas))
self.W = source.W[row_indices]
self.name = getattr(source, 'name', '')
self.ids = np.array(source.ids[row_indices])
self.attributes = getattr(source, 'attributes', {})
return self
@classmethod
def from_numpy(cls, domain, X, Y=None, metas=None, W=None):
"""
Construct a table from numpy arrays with the given domain. The number
of variables in the domain must match the number of columns in the
corresponding arrays. All arrays must have the same number of rows.
Arrays may be of different numpy types, and may be dense or sparse.
:param domain: the domain for the new table
:type domain: Orange.data.Domain
:param X: array with attribute values
:type X: np.array
:param Y: array with class values
:type Y: np.array
:param metas: array with meta attributes
:type metas: np.array
:param W: array with weights
:type W: np.array
:return:
"""
X, Y, W = _check_arrays(X, Y, W, dtype='float64')
metas, = _check_arrays(metas, dtype=object)
if Y is not None and Y.ndim == 1:
Y = Y.reshape(Y.shape[0], 1)
if domain is None:
domain = Domain.from_numpy(X, Y, metas)
if Y is None:
if sp.issparse(X):
Y = np.empty((X.shape[0], 0), object)
else:
Y = X[:, len(domain.attributes):]
X = X[:, :len(domain.attributes)]
if metas is None:
metas = np.empty((X.shape[0], 0), object)
if W is None or W.size == 0:
W = np.empty((X.shape[0], 0))
else:
W = W.reshape(W.size)
if X.shape[1] != len(domain.attributes):
raise ValueError(
"Invalid number of variable columns ({} != {})".format(
X.shape[1], len(domain.attributes))
)
if Y.shape[1] != len(domain.class_vars):
raise ValueError(
"Invalid number of class columns ({} != {})".format(
Y.shape[1], len(domain.class_vars))
)
if metas.shape[1] != len(domain.metas):
raise ValueError(
"Invalid number of meta attribute columns ({} != {})".format(
metas.shape[1], len(domain.metas))
)
if not X.shape[0] == Y.shape[0] == metas.shape[0] == W.shape[0]:
raise ValueError(
"Parts of data contain different numbers of rows.")
self = cls()
self.domain = domain
self.X = X
self.Y = Y
self.metas = metas
self.W = W
self.n_rows = self.X.shape[0]
cls._init_ids(self)
self.attributes = {}
return self
@classmethod
def from_list(cls, domain, rows, weights=None):
if weights is not None and len(rows) != len(weights):
raise ValueError("mismatching number of instances and weights")
self = cls.from_domain(domain, len(rows), weights is not None)
attrs, classes = domain.attributes, domain.class_vars
metas = domain.metas
nattrs, ncls = len(domain.attributes), len(domain.class_vars)
for i, row in enumerate(rows):
if isinstance(row, Instance):
row = row.list
for j, (var, val) in enumerate(zip(attrs, row)):
self.X[i, j] = var.to_val(val)
for j, (var, val) in enumerate(zip(classes, row[nattrs:])):
self._Y[i, j] = var.to_val(val)
for j, (var, val) in enumerate(zip(metas, row[nattrs + ncls:])):
self.metas[i, j] = var.to_val(val)
if weights is not None:
self.W = np.array(weights)
return self
@classmethod
def _init_ids(cls, obj):
with cls._next_instance_lock:
obj.ids = np.array(range(cls._next_instance_id, cls._next_instance_id + obj.X.shape[0]))
cls._next_instance_id += obj.X.shape[0]
@classmethod
def new_id(cls):
with cls._next_instance_lock:
id = cls._next_instance_id
cls._next_instance_id += 1
return id
def save(self, filename):
"""
Save a data table to a file. The path can be absolute or relative.
:param filename: File name
:type filename: str
"""
ext = os.path.splitext(filename)[1]
from Orange.data.io import FileFormat
writer = FileFormat.writers.get(ext)
if not writer:
desc = FileFormat.names.get(ext)
if desc:
raise IOError("Writing of {}s is not supported".
format(desc.lower()))
else:
raise IOError("Unknown file name extension.")
writer.write_file(filename, self)
@classmethod
def from_file(cls, filename):
"""
Read a data table from a file. The path can be absolute or relative.
:param filename: File name
:type filename: str
:return: a new data table
:rtype: Orange.data.Table
"""
from Orange.data.io import FileFormat
absolute_filename = FileFormat.locate(filename, dataset_dirs)
reader = FileFormat.get_reader(absolute_filename)
data = reader.read()
# Readers return plain table. Make sure to cast it to appropriate
# (subclass) type
if cls != data.__class__:
data = cls(data)
# no need to call _init_ids as fuctions from .io already
# construct a table with .ids
data.__file__ = absolute_filename
return data
@classmethod
def from_url(cls, url):
from Orange.data.io import UrlReader
reader = UrlReader(url)
data = reader.read()
if cls != data.__class__:
data = cls(data)
return data
# Helper function for __setitem__ and insert:
# Set the row of table data matrices
# noinspection PyProtectedMember
def _set_row(self, example, row):
domain = self.domain
if isinstance(example, Instance):
if example.domain == domain:
if isinstance(example, RowInstance):
self.X[row] = example._x
self._Y[row] = example._y
else:
self.X[row] = example._x
self._Y[row] = example._y
self.metas[row] = example._metas
return
c = self.domain.get_conversion(example.domain)
self.X[row], self._Y[row], self.metas[row] = \
self.domain.convert(example)
try:
self.ids[row] = example.id
except:
with type(self)._next_instance_lock:
self.ids[row] = type(self)._next_instance_id
type(self)._next_instance_id += 1
else:
self.X[row] = [var.to_val(val)
for var, val in zip(domain.attributes, example)]
self._Y[row] = [var.to_val(val)
for var, val in
zip(domain.class_vars,
example[len(domain.attributes):])]
self.metas[row] = np.array([var.Unknown for var in domain.metas],
dtype=object)
def _check_all_dense(self):
return all(x in (Storage.DENSE, Storage.MISSING)
for x in (self.X_density(), self.Y_density(),
self.metas_density()))
# A helper function for extend and insert
# Resize X, Y, metas and W.
def _resize_all(self, new_length):
old_length = self.X.shape[0]
if old_length == new_length:
return
if not self._check_all_dense():
raise ValueError("Tables with sparse data cannot be resized")
try:
self.X.resize(new_length, self.X.shape[1])
self._Y.resize(new_length, self._Y.shape[1])
self.metas.resize(new_length, self.metas.shape[1])
if self.W.ndim == 2:
self.W.resize((new_length, 0))
else:
self.W.resize(new_length)
self.ids.resize(new_length)
except Exception:
if self.X.shape[0] == new_length:
self.X.resize(old_length, self.X.shape[1])
if self._Y.shape[0] == new_length:
self._Y.resize(old_length, self._Y.shape[1])
if self.metas.shape[0] == new_length:
self.metas.resize(old_length, self.metas.shape[1])
if self.W.shape[0] == new_length:
if self.W.ndim == 2:
self.W.resize((old_length, 0))
else:
self.W.resize(old_length)
if self.ids.shape[0] == new_length:
self.ids.resize(old_length)
raise
def __getitem__(self, key):
if isinstance(key, Integral):
return RowInstance(self, key)
if not isinstance(key, tuple):
return self.from_table_rows(self, key)
if len(key) != 2:
raise IndexError("Table indices must be one- or two-dimensional")
row_idx, col_idx = key
if isinstance(row_idx, Integral):
if isinstance(col_idx, (str, Integral, Variable)):
col_idx = self.domain.index(col_idx)
var = self.domain[col_idx]
if 0 <= col_idx < len(self.domain.attributes):
return Value(var, self.X[row_idx, col_idx])
elif col_idx >= len(self.domain.attributes):
return Value(
var,
self._Y[row_idx,
col_idx - len(self.domain.attributes)])
elif col_idx < 0:
return Value(var, self.metas[row_idx, -1 - col_idx])
else:
row_idx = [row_idx]
# multiple rows OR single row but multiple columns:
# construct a new table
attributes, col_indices = self.domain._compute_col_indices(col_idx)
if attributes is not None:
n_attrs = len(self.domain.attributes)
r_attrs = [attributes[i]
for i, col in enumerate(col_indices)
if 0 <= col < n_attrs]
r_classes = [attributes[i]
for i, col in enumerate(col_indices)
if col >= n_attrs]
r_metas = [attributes[i]
for i, col in enumerate(col_indices) if col < 0]
domain = Domain(r_attrs, r_classes, r_metas)
else:
domain = self.domain
return self.from_table(domain, self, row_idx)
def __setitem__(self, key, value):
if not self._check_all_dense():
raise ValueError(
"Assignment to rows of sparse data is not supported")
if not isinstance(key, tuple):
if isinstance(value, Real):
self.X[key, :] = value
return
self._set_row(value, key)
return
if len(key) != 2:
raise IndexError("Table indices must be one- or two-dimensional")
row_idx, col_idx = key
# single row
if isinstance(row_idx, Integral):
if isinstance(col_idx, slice):
col_idx = range(*slice.indices(col_idx, self.X.shape[1]))
if not isinstance(col_idx, str) and isinstance(col_idx, Iterable):
col_idx = list(col_idx)
if not isinstance(col_idx, str) and isinstance(col_idx, Sized):
if isinstance(value, (Sequence, np.ndarray)):
values = value
elif isinstance(value, Iterable):
values = list(value)
else:
raise TypeError("Setting multiple values requires a "
"sequence or numpy array")
if len(values) != len(col_idx):
raise ValueError("Invalid number of values")
else:
col_idx, values = [col_idx], [value]
for value, col_idx in zip(values, col_idx):
if not isinstance(value, Integral):
value = self.domain[col_idx].to_val(value)
if not isinstance(col_idx, Integral):
col_idx = self.domain.index(col_idx)
if col_idx >= 0:
if col_idx < self.X.shape[1]:
self.X[row_idx, col_idx] = value
else:
self._Y[row_idx, col_idx - self.X.shape[1]] = value
else:
self.metas[row_idx, -1 - col_idx] = value
# multiple rows, multiple columns
attributes, col_indices = self.domain._compute_col_indices(col_idx)
if col_indices is ...:
col_indices = range(len(self.domain))
n_attrs = self.X.shape[1]
if isinstance(value, str):
if not attributes:
attributes = self.domain.attributes
for var, col in zip(attributes, col_indices):
if 0 <= col < n_attrs:
self.X[row_idx, col] = var.to_val(value)
elif col >= n_attrs:
self._Y[row_idx, col - n_attrs] = var.to_val(value)
else:
self.metas[row_idx, -1 - col] = var.to_val(value)
else:
attr_cols = np.fromiter(
(col for col in col_indices if 0 <= col < n_attrs), int)
class_cols = np.fromiter(
(col - n_attrs for col in col_indices if col >= n_attrs), int)
meta_cols = np.fromiter(
(-1 - col for col in col_indices if col < 0), int)
if value is None:
value = Unknown
if not isinstance(value, Real) and \
(len(attr_cols) or len(class_cols)):
raise TypeError(
"Ordinary attributes can only have primitive values")
if len(attr_cols):
if len(attr_cols) == 1:
# scipy.sparse matrices only allow primitive indices.
attr_cols = attr_cols[0]
self.X[row_idx, attr_cols] = value
if len(class_cols):
if len(class_cols) == 1:
# scipy.sparse matrices only allow primitive indices.
class_cols = class_cols[0]
self._Y[row_idx, class_cols] = value
if len(meta_cols):
self.metas[row_idx, meta_cols] = value
def __delitem__(self, key):
if not self._check_all_dense():
raise ValueError("Rows of sparse data cannot be deleted")
if key is ...:
key = range(len(self))
self.X = np.delete(self.X, key, axis=0)
self.Y = np.delete(self._Y, key, axis=0)
self.metas = np.delete(self.metas, key, axis=0)
self.W = np.delete(self.W, key, axis=0)
def __len__(self):
return self.X.shape[0]
def __str__(self):
return "[" + ",\n ".join(str(ex) for ex in self)
def __repr__(self):
s = "[" + ",\n ".join(repr(ex) for ex in self[:5])
if len(self) > 5:
s += ",\n ..."
s += "\n]"
return s
def clear(self):
"""Remove all rows from the table."""
if not self._check_all_dense():
raise ValueError("Tables with sparse data cannot be cleared")
del self[...]
def append(self, instance):
"""
Append a data instance to the table.
:param instance: a data instance
:type instance: Orange.data.Instance or a sequence of values
"""
self.insert(len(self), instance)
def insert(self, row, instance):
"""
Insert a data instance into the table.
:param row: row index
:type row: int
:param instance: a data instance
:type instance: Orange.data.Instance or a sequence of values
"""
if row < 0:
row += len(self)
if row < 0 or row > len(self):
raise IndexError("Index out of range")
self.ensure_copy() # ensure that numpy arrays are single-segment for resize
self._resize_all(len(self) + 1)
if row < len(self):
self.X[row + 1:] = self.X[row:-1]
self._Y[row + 1:] = self._Y[row:-1]
self.metas[row + 1:] = self.metas[row:-1]
self.W[row + 1:] = self.W[row:-1]
self.ids[row + 1:] = self.ids[row:-1]
try:
self._set_row(instance, row)
if self.W.shape[-1]:
self.W[row] = 1
except Exception:
self.X[row:-1] = self.X[row + 1:]
self._Y[row:-1] = self._Y[row + 1:]
self.metas[row:-1] = self.metas[row + 1:]
self.W[row:-1] = self.W[row + 1:]
self.ids[row:-1] = self.ids[row + 1:]
self._resize_all(len(self) - 1)
raise
def extend(self, instances):
"""
Extend the table with the given instances. The instances can be given
as a table of the same or a different domain, or a sequence. In the
latter case, each instances can be given as
:obj:`~Orange.data.Instance` or a sequence of values (e.g. list,
tuple, numpy.array).
:param instances: additional instances
:type instances: Orange.data.Table or a sequence of instances
"""
old_length = len(self)
self._resize_all(old_length + len(instances))
try:
# shortcut
if isinstance(instances, Table) and instances.domain == self.domain:
self.X[old_length:] = instances.X
self._Y[old_length:] = instances._Y
self.metas[old_length:] = instances.metas
if self.W.shape[-1]:
if instances.W.shape[-1]:
self.W[old_length:] = instances.W
else:
self.W[old_length:] = 1
self.ids[old_length:] = instances.ids
else:
for i, example in enumerate(instances):
self[old_length + i] = example
try:
self.ids[old_length + i] = example.id
except AttributeError:
self.ids[old_length + i] = self.new_id()
except Exception:
self._resize_all(old_length)
raise
@staticmethod
def concatenate(tables, axis=1):
"""Return concatenation of `tables` by `axis`."""
if not tables:
raise ValueError('need at least one table to concatenate')
if 1 == len(tables):
return tables[0].copy()
CONCAT_ROWS, CONCAT_COLS = 0, 1
if axis == CONCAT_ROWS:
table = tables[0].copy()
for t in tables[1:]:
table.extend(t)
return table
elif axis == CONCAT_COLS:
if reduce(operator.iand,
(set(map(operator.attrgetter('name'),
chain(t.domain.variables, t.domain.metas)))
for t in tables)):
raise ValueError('Concatenating two domains with variables '
'with same name is undefined')
domain = Domain(flatten(t.domain.attributes for t in tables),
flatten(t.domain.class_vars for t in tables),
flatten(t.domain.metas for t in tables))
def ndmin(A):
return A if A.ndim > 1 else A.reshape(A.shape[0], 1)
table = Table.from_numpy(domain,
np.hstack(tuple(ndmin(t.X) for t in tables)),
np.hstack(tuple(ndmin(t.Y) for t in tables)),
np.hstack(tuple(ndmin(t.metas) for t in tables)),
np.hstack(tuple(ndmin(t.W) for t in tables)))
return table
raise ValueError('axis {} out of bounds [0, 2)'.format(axis))
def is_view(self):
"""
Return `True` if all arrays represent a view referring to another table
"""
return ((not self.X.shape[-1] or self.X.base is not None) and
(not self._Y.shape[-1] or self._Y.base is not None) and
(not self.metas.shape[-1] or self.metas.base is not None) and
(not self._weights.shape[-1] or self.W.base is not None))
def is_copy(self):
"""
Return `True` if the table owns its data
"""
return ((not self.X.shape[-1] or self.X.base is None) and
(self._Y.base is None) and
(self.metas.base is None) and
(self.W.base is None))
def is_sparse(self):
"""
Return `True` if the table stores data in sparse format
"""
return any(sp.issparse(i) for i in [self.X, self.Y, self.metas])
def ensure_copy(self):
"""
Ensure that the table owns its data; copy arrays when necessary.
"""
def is_view(x):
# Sparse matrices don't have views like numpy arrays. Since indexing on
# them creates copies in constructor we can skip this check here.
return not sp.issparse(x) and x.base is not None
if is_view(self.X):
self.X = self.X.copy()
if is_view(self._Y):
self._Y = self._Y.copy()
if is_view(self.metas):
self.metas = self.metas.copy()
if is_view(self.W):
self.W = self.W.copy()
def copy(self):
"""
Return a copy of the table
"""
t = self.__class__(self)
t.ensure_copy()
return t
@staticmethod
def __determine_density(data):
if data is None:
return Storage.Missing
if data is not None and sp.issparse(data):
return Storage.SPARSE_BOOL if (data.data == 1).all() else Storage.SPARSE
else:
return Storage.DENSE
def X_density(self):
if not hasattr(self, "_X_density"):
self._X_density = self.__determine_density(self.X)
return self._X_density
def Y_density(self):
if not hasattr(self, "_Y_density"):
self._Y_density = self.__determine_density(self._Y)
return self._Y_density
def metas_density(self):
if not hasattr(self, "_metas_density"):
self._metas_density = self.__determine_density(self.metas)
return self._metas_density
def set_weights(self, weight=1):
"""
Set weights of data instances; create a vector of weights if necessary.
"""
if not self.W.shape[-1]:
self.W = np.empty(len(self))
self.W[:] = weight
def has_weights(self):
"""Return `True` if the data instances are weighed. """
return self.W.shape[-1] != 0
def total_weight(self):
"""
Return the total weight of instances in the table, or their number if
they are unweighted.
"""
if self.W.shape[-1]:
return sum(self.W)
return len(self)
def has_missing(self):
"""Return `True` if there are any missing attribute or class values."""
missing_x = not sp.issparse(self.X) and bn.anynan(self.X) # do not check for sparse X
return missing_x or bn.anynan(self._Y)
def has_missing_class(self):
"""Return `True` if there are any missing class values."""
return bn.anynan(self._Y)
def checksum(self, include_metas=True):
# TODO: zlib.adler32 does not work for numpy arrays with dtype object
# (after pickling and unpickling such arrays, checksum changes)
# Why, and should we fix it or remove it?
"""Return a checksum over X, Y, metas and W."""
cs = zlib.adler32(np.ascontiguousarray(self.X))
cs = zlib.adler32(np.ascontiguousarray(self._Y), cs)
if include_metas:
cs = zlib.adler32(np.ascontiguousarray(self.metas), cs)
cs = zlib.adler32(np.ascontiguousarray(self.W), cs)
return cs
def shuffle(self):
"""Randomly shuffle the rows of the table."""
if not self._check_all_dense():
raise ValueError("Rows of sparse data cannot be shuffled")
ind = np.arange(self.X.shape[0])
np.random.shuffle(ind)
self.X = self.X[ind]
self._Y = self._Y[ind]
self.metas = self.metas[ind]
self.W = self.W[ind]
def get_column_view(self, index):
"""
Return a vector - as a view, not a copy - with a column of the table,
and a bool flag telling whether this column is sparse. Note that
vertical slicing of sparse matrices is inefficient.
:param index: the index of the column
:type index: int, str or Orange.data.Variable
:return: (one-dimensional numpy array, sparse)
"""
def rx(M):
if sp.issparse(M):
return np.asarray(M.todense())[:, 0], True
else:
return M, False
if not isinstance(index, Integral):
index = self.domain.index(index)
if index >= 0:
if index < self.X.shape[1]:
return rx(self.X[:, index])
else:
return rx(self._Y[:, index - self.X.shape[1]])
else:
return rx(self.metas[:, -1 - index])
def _filter_is_defined(self, columns=None, negate=False):
if columns is None:
if sp.issparse(self.X):
remove = (self.X.indptr[1:] !=
self.X.indptr[-1:] + self.X.shape[1])
else:
remove = bn.anynan(self.X, axis=1)
if sp.issparse(self._Y):
remove = np.logical_or(remove, self._Y.indptr[1:] !=
self._Y.indptr[-1:] + self._Y.shape[1])
else:
remove = np.logical_or(remove, bn.anynan(self._Y, axis=1))
else:
remove = np.zeros(len(self), dtype=bool)
for column in columns:
col, sparse = self.get_column_view(column)
if sparse:
remove = np.logical_or(remove, col == 0)
else:
remove = np.logical_or(remove, bn.anynan([col], axis=0))
retain = remove if negate else np.logical_not(remove)
return self.from_table_rows(self, retain)
def _filter_has_class(self, negate=False):
if sp.issparse(self._Y):
if negate:
retain = (self._Y.indptr[1:] !=
self._Y.indptr[-1:] + self._Y.shape[1])
else:
retain = (self._Y.indptr[1:] ==
self._Y.indptr[-1:] + self._Y.shape[1])
else:
retain = bn.anynan(self._Y, axis=1)
if not negate:
retain = np.logical_not(retain)
return self.from_table_rows(self, retain)
def _filter_same_value(self, column, value, negate=False):
if not isinstance(value, Real):
value = self.domain[column].to_val(value)
sel = self.get_column_view(column)[0] == value
if negate:
sel = np.logical_not(sel)
return self.from_table_rows(self, sel)
def _filter_values_indicators(self, filter):
from Orange.data import filter as data_filter
if isinstance(filter, data_filter.Values):
conditions = filter.conditions
conjunction = filter.conjunction
else:
conditions = [filter]
conjunction = True
if conjunction:
sel = np.ones(len(self), dtype=bool)
else:
sel = np.zeros(len(self), dtype=bool)
for f in conditions:
if isinstance(f, data_filter.Values):
if conjunction:
sel *= self._filter_values_indicators(f)
else:
sel += self._filter_values_indicators(f)
continue
col = self.get_column_view(f.column)[0]
if isinstance(f, data_filter.FilterDiscrete) and f.values is None \
or isinstance(f, data_filter.FilterContinuous) and \
f.oper == f.IsDefined:
col = col.astype(float)
if conjunction:
sel *= ~np.isnan(col)
else:
sel += ~np.isnan(col)
elif isinstance(f, data_filter.FilterString) and \
f.oper == f.IsDefined:
if conjunction:
sel *= col.astype(bool)
else:
sel += col.astype(bool)
elif isinstance(f, data_filter.FilterDiscrete):
if conjunction:
s2 = np.zeros(len(self), dtype=bool)
for val in f.values:
if not isinstance(val, Real):
val = self.domain[f.column].to_val(val)
s2 += (col == val)
sel *= s2
else:
for val in f.values:
if not isinstance(val, Real):
val = self.domain[f.column].to_val(val)
sel += (col == val)
elif isinstance(f, data_filter.FilterStringList):
if not f.case_sensitive:
# noinspection PyTypeChecker
col = np.char.lower(np.array(col, dtype=str))
vals = [val.lower() for val in f.values]
else:
vals = f.values
if conjunction:
sel *= reduce(operator.add,
(col == val for val in vals))
else:
sel = reduce(operator.add,
(col == val for val in vals), sel)
elif isinstance(f, data_filter.FilterRegex):
sel = np.vectorize(f)(col)
elif isinstance(f, (data_filter.FilterContinuous,
data_filter.FilterString)):
if (isinstance(f, data_filter.FilterString) and
not f.case_sensitive):
# noinspection PyTypeChecker
col = np.char.lower(np.array(col, dtype=str))
fmin = f.min.lower()
if f.oper in [f.Between, f.Outside]:
fmax = f.max.lower()
else:
fmin, fmax = f.min, f.max
if f.oper == f.Equal:
col = (col == fmin)
elif f.oper == f.NotEqual:
col = (col != fmin)
elif f.oper == f.Less:
col = (col < fmin)
elif f.oper == f.LessEqual:
col = (col <= fmin)
elif f.oper == f.Greater:
col = (col > fmin)
elif f.oper == f.GreaterEqual:
col = (col >= fmin)
elif f.oper == f.Between:
col = (col >= fmin) * (col <= fmax)
elif f.oper == f.Outside:
col = (col < fmin) + (col > fmax)
elif not isinstance(f, data_filter.FilterString):
raise TypeError("Invalid operator")
elif f.oper == f.Contains:
col = np.fromiter((fmin in e for e in col),
dtype=bool)
elif f.oper == f.StartsWith:
col = np.fromiter((e.startswith(fmin) for e in col),
dtype=bool)
elif f.oper == f.EndsWith:
col = np.fromiter((e.endswith(fmin) for e in col),
dtype=bool)
else:
raise TypeError("Invalid operator")
if conjunction:
sel *= col
else:
sel += col
else:
raise TypeError("Invalid filter")
if filter.negate:
sel = ~sel
return sel
def _filter_values(self, filter):
sel = self._filter_values_indicators(filter)
return self.from_table(self.domain, self, sel)
def _compute_basic_stats(self, columns=None,
include_metas=False, compute_variance=False):
if compute_variance:
raise NotImplementedError("computation of variance is "
"not implemented yet")
W = self.W if self.has_weights() else None
rr = []
stats = []
if not columns:
if self.domain.attributes:
rr.append(fast_stats(self.X, W))
if self.domain.class_vars:
rr.append(fast_stats(self._Y, W))
if include_metas and self.domain.metas:
rr.append(fast_stats(self.metas, W))
if len(rr):
stats = np.vstack(tuple(rr))
else:
columns = [self.domain.index(c) for c in columns]
nattrs = len(self.domain.attributes)
Xs = any(0 <= c < nattrs for c in columns) and fast_stats(self.X, W)
Ys = any(c >= nattrs for c in columns) and fast_stats(self._Y, W)
ms = any(c < 0 for c in columns) and fast_stats(self.metas, W)
for column in columns:
if 0 <= column < nattrs:
stats.append(Xs[column, :])
elif column >= nattrs:
stats.append(Ys[column - nattrs, :])
else:
stats.append(ms[-1 - column])
return stats
def _compute_distributions(self, columns=None):
def _get_matrix(M, cachedM, col):
nonlocal single_column
W = self.W if self.has_weights() else None
if not sp.issparse(M):
return M[:, col], W, None
if cachedM is None:
if single_column:
warn("computing distributions on sparse data "
"for a single column is inefficient")
cachedM = sp.csc_matrix(self.X)
return cachedM[:, col], W, cachedM
if columns is None:
columns = range(len(self.domain.variables))
single_column = False
else:
columns = [self.domain.index(var) for var in columns]
single_column = len(columns) == 1 and len(self.domain) > 1
distributions = []
Xcsc = Ycsc = None
for col in columns:
var = self.domain[col]
if 0 <= col < self.X.shape[1]:
m, W, Xcsc = _get_matrix(self.X, Xcsc, col)
elif col < 0:
m, W, Xcsc = _get_matrix(self.metas, Xcsc, col * (-1) - 1)
else:
m, W, Ycsc = _get_matrix(self._Y, Ycsc, col - self.X.shape[1])
if var.is_discrete:
if W is not None:
W = W.ravel()
dist, unknowns = bincount(m, len(var.values) - 1, W)
elif not m.shape[0]:
dist, unknowns = np.zeros((2, 0)), 0
else:
if W is not None:
unknowns = countnans(m, W)
if sp.issparse(m):
arg_sort = np.argsort(m.data)
ranks = m.indices[arg_sort]
vals = np.vstack((m.data[arg_sort], W[ranks].flatten()))
else:
ranks = np.argsort(m)
vals = np.vstack((m[ranks], W[ranks].flatten()))
else:
unknowns = countnans(m.astype(float))
if sp.issparse(m):
m = m.data
vals = np.ones((2, m.shape[0]))
vals[0, :] = m
vals[0, :].sort()
dist = np.array(_valuecount.valuecount(vals))
distributions.append((dist, unknowns))
return distributions
def _compute_contingency(self, col_vars=None, row_var=None):
n_atts = self.X.shape[1]
if col_vars is None:
col_vars = range(len(self.domain.variables))
single_column = False
else:
col_vars = [self.domain.index(var) for var in col_vars]
single_column = len(col_vars) == 1 and len(self.domain) > 1
if row_var is None:
row_var = self.domain.class_var
if row_var is None:
raise ValueError("No row variable")
row_desc = self.domain[row_var]
if not row_desc.is_discrete:
raise TypeError("Row variable must be discrete")
row_indi = self.domain.index(row_var)
n_rows = len(row_desc.values)
if 0 <= row_indi < n_atts:
row_data = self.X[:, row_indi]
elif row_indi < 0:
row_data = self.metas[:, -1 - row_indi]
else:
row_data = self._Y[:, row_indi - n_atts]
W = self.W if self.has_weights() else None
nan_inds = None
col_desc = [self.domain[var] for var in col_vars]
col_indi = [self.domain.index(var) for var in col_vars]
if any(not (var.is_discrete or var.is_continuous)
for var in col_desc):
raise ValueError("contingency can be computed only for discrete "
"and continuous values")
if row_data.dtype.kind != "f": #meta attributes can be stored as type object
row_data = row_data.astype(float)
unknown_rows = countnans(row_data)
if unknown_rows:
nan_inds = np.isnan(row_data)
row_data = row_data[~nan_inds]
if W:
W = W[~nan_inds]
unknown_rows = np.sum(W[nan_inds])
contingencies = [None] * len(col_desc)
for arr, f_cond, f_ind in (
(self.X, lambda i: 0 <= i < n_atts, lambda i: i),
(self._Y, lambda i: i >= n_atts, lambda i: i - n_atts),
(self.metas, lambda i: i < 0, lambda i: -1 - i)):
if nan_inds is not None:
arr = arr[~nan_inds]
arr_indi = [e for e, ind in enumerate(col_indi) if f_cond(ind)]
vars = [(e, f_ind(col_indi[e]), col_desc[e]) for e in arr_indi]
disc_vars = [v for v in vars if v[2].is_discrete]
if disc_vars:
if sp.issparse(arr):
max_vals = max(len(v[2].values) for v in disc_vars)
disc_indi = {i for _, i, _ in disc_vars}
mask = [i in disc_indi for i in range(arr.shape[1])]
conts, nans = contingency(arr, row_data, max_vals - 1,
n_rows - 1, W, mask)
for col_i, arr_i, var in disc_vars:
n_vals = len(var.values)
contingencies[col_i] = (conts[arr_i][:, :n_vals],
nans[arr_i])
else:
for col_i, arr_i, var in disc_vars:
contingencies[col_i] = contingency(
arr[:, arr_i].astype(float),
row_data, len(var.values) - 1, n_rows - 1, W)
cont_vars = [v for v in vars if v[2].is_continuous]
if cont_vars:
classes = row_data.astype(dtype=np.int8)
if W is not None:
W = W.astype(dtype=np.float64)
if sp.issparse(arr):
arr = sp.csc_matrix(arr)
for col_i, arr_i, _ in cont_vars:
if sp.issparse(arr):
col_data = arr.data[arr.indptr[arr_i]:
arr.indptr[arr_i + 1]]
rows = arr.indices[arr.indptr[arr_i]:
arr.indptr[arr_i + 1]]
W_ = None if W is None else W[rows]
classes_ = classes[rows]
else:
col_data, W_, classes_ = arr[:, arr_i], W, classes
col_data = col_data.astype(dtype=np.float64)
U, C, unknown = _contingency.contingency_floatarray(
col_data, classes_, n_rows, W_)
contingencies[col_i] = ([U, C], unknown)
return contingencies, unknown_rows
@classmethod
def transpose(cls, table, feature_names_column="",
meta_attr_name="Feature name"):
"""
Transpose the table.
:param table: Table - table to transpose
:param feature_names_column: str - name of (String) meta attribute to
use for feature names
:param meta_attr_name: str - name of new meta attribute into which
feature names are mapped
:return: Table - transposed table
"""
self = cls()
n_cols, self.n_rows = table.X.shape
old_domain = table.attributes.get("old_domain")
# attributes
# - classes and metas to attributes of attributes
# - arbitrary meta column to feature names
self.X = table.X.T
attributes = [ContinuousVariable(str(row[feature_names_column]))
for row in table] if feature_names_column else \
[ContinuousVariable("Feature " + str(i + 1).zfill(
int(np.ceil(np.log10(n_cols))))) for i in range(n_cols)]
if old_domain and feature_names_column:
for i in range(len(attributes)):
if attributes[i].name in old_domain:
var = old_domain[attributes[i].name]
attr = ContinuousVariable(var.name) if var.is_continuous \
else DiscreteVariable(var.name, var.values)
attr.attributes = var.attributes.copy()
attributes[i] = attr
def set_attributes_of_attributes(_vars, _table):
for i, variable in enumerate(_vars):
if variable.name == feature_names_column:
continue
for j, row in enumerate(_table):
value = variable.repr_val(row) if np.isscalar(row) \
else row[i] if isinstance(row[i], str) \
else variable.repr_val(row[i])
if value not in MISSING_VALUES:
attributes[j].attributes[variable.name] = value
set_attributes_of_attributes(table.domain.class_vars, table.Y)
set_attributes_of_attributes(table.domain.metas, table.metas)
# weights
self.W = np.empty((self.n_rows, 0))
def get_table_from_attributes_of_attributes(_vars, _dtype=float):
T = np.empty((self.n_rows, len(_vars)), dtype=_dtype)
for i, _attr in enumerate(table.domain.attributes):
for j, _var in enumerate(_vars):
val = str(_attr.attributes.get(_var.name, ""))
if not _var.is_string:
val = np.nan if val in MISSING_VALUES else \
_var.values.index(val) if \
_var.is_discrete else float(val)
T[i, j] = val
return T
# class_vars - attributes of attributes to class - from old domain
class_vars = []
if old_domain:
class_vars = old_domain.class_vars
self.Y = get_table_from_attributes_of_attributes(class_vars)
# metas
# - feature names and attributes of attributes to metas
self.metas, metas = np.empty((self.n_rows, 0), dtype=object), []
if meta_attr_name not in [m.name for m in table.domain.metas] and \
table.domain.attributes:
self.metas = np.array([[a.name] for a in table.domain.attributes],
dtype=object)
metas.append(StringVariable(meta_attr_name))
names = chain.from_iterable(list(attr.attributes)
for attr in table.domain.attributes)
names = sorted(set(names) - {var.name for var in class_vars})
def guessed_var(i, var_name):
orig_vals = M[:, i]
val_map, vals, var_type = Orange.data.io.guess_data_type(orig_vals)
values, variable = Orange.data.io.sanitize_variable(
val_map, vals, orig_vals, var_type,
{}, _metas, None, var_name)
M[:, i] = values
return variable
_metas = [StringVariable(n) for n in names]
if old_domain:
_metas = [m for m in old_domain.metas if m.name != meta_attr_name]
M = get_table_from_attributes_of_attributes(_metas, _dtype=object)
if not old_domain:
_metas = [guessed_var(i, m.name) for i, m in enumerate(_metas)]
if _metas:
self.metas = np.hstack((self.metas, M))
metas.extend(_metas)
self.domain = Domain(attributes, class_vars, metas)
cls._init_ids(self)
self.attributes = table.attributes.copy()
self.attributes["old_domain"] = table.domain
return self
def _check_arrays(*arrays, dtype=None):
checked = []
if not len(arrays):
return checked
def ninstances(array):
if hasattr(array, "shape"):
return array.shape[0]
else:
return len(array) if array is not None else 0
shape_1 = ninstances(arrays[0])
for array in arrays:
if array is None:
checked.append(array)
continue
if ninstances(array) != shape_1:
raise ValueError("Leading dimension mismatch (%d != %d)"
% (len(array), shape_1))
if sp.issparse(array):
array.data = np.asarray(array.data)
has_inf = _check_inf(array.data)
else:
if dtype is not None:
array = np.asarray(array, dtype=dtype)
else:
array = np.asarray(array)
has_inf = _check_inf(array)
if has_inf:
raise ValueError("Array contains infinity.")
checked.append(array)
return checked
def _check_inf(array):
return array.dtype.char in np.typecodes['AllFloat'] and \
np.isinf(array.data).any()
def _subarray(arr, rows, cols):
return arr[_rxc_ix(rows, cols)]
def _rxc_ix(rows, cols):
"""
Construct an index object to index the `rows` x `cols` cross product.
Rows and columns can be a 1d bool or int sequence, a slice or an
Ellipsis (`...`). The later is a convenience and is interpreted the same
as `slice(None, None, -1)`
Parameters
----------
rows : 1D sequence, slice or Ellipsis
Row indices.
cols : 1D sequence, slice or Ellipsis
Column indices.
See Also
--------
numpy.ix_
Examples
--------
>>> import numpy as np
>>> a = np.arange(10).reshape(2, 5)
>>> a[_rxc_ix([0, 1], [3, 4])]
array([[3, 4],
[8, 9]])
>>> a[_rxc_ix([False, True], ...)]
array([[5, 6, 7, 8, 9]])
"""
rows = slice(None, None, 1) if rows is ... else rows
cols = slice(None, None, 1) if cols is ... else cols
isslice = (isinstance(rows, slice), isinstance(cols, slice))
if isslice == (True, True):
return rows, cols
elif isslice == (True, False):
return rows, np.asarray(np.ix_(cols), int).ravel()
elif isslice == (False, True):
return np.asarray(np.ix_(rows), int).ravel(), cols
else:
r, c = np.ix_(rows, cols)
return np.asarray(r, int), np.asarray(c, int)
| {
"content_hash": "ce34f793edf99d8cfe51475be4a08088",
"timestamp": "",
"source": "github",
"line_count": 1644,
"max_line_length": 105,
"avg_line_length": 38.681265206812654,
"alnum_prop": 0.5088690401308341,
"repo_name": "cheral/orange3",
"id": "eecbda0cb96a866cacd1c641135ce47e514524ad",
"size": "63592",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Orange/data/table.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "20412"
},
{
"name": "C++",
"bytes": "1992"
},
{
"name": "GLSL",
"bytes": "75"
},
{
"name": "HTML",
"bytes": "3503"
},
{
"name": "JavaScript",
"bytes": "12023"
},
{
"name": "Jupyter Notebook",
"bytes": "6662"
},
{
"name": "NSIS",
"bytes": "20217"
},
{
"name": "Python",
"bytes": "4139574"
},
{
"name": "Shell",
"bytes": "47441"
}
],
"symlink_target": ""
} |
import factory
import factory.django
from .models import Post
from django.utils.timezone import now
from django.utils.text import slugify
from faker.providers.lorem.la import Provider as LoremProvider
class ExtendedLoremProvider(LoremProvider):
@classmethod
def title(cls, nb_words=6, variable_nb_words=True):
return cls.sentence(nb_words, variable_nb_words)[:-1]
factory.Faker.add_provider(ExtendedLoremProvider)
class PostFactory(factory.django.DjangoModelFactory):
class Meta:
model = Post
is_published = False
title = factory.Faker('title')
slug = factory.LazyAttribute(lambda p: slugify(p.title))
excerpt = factory.Faker('sentence', nb_words=20)
body = factory.Faker('text', max_nb_chars=5000)
@factory.lazy_attribute
def published_at(self):
return now() if self.is_published else None
| {
"content_hash": "f3d8ad177b7564bf9e6ae00a737d3af0",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 62,
"avg_line_length": 27.03125,
"alnum_prop": 0.7294797687861272,
"repo_name": "Prometeo/blog",
"id": "ac6112b71c0145a89dcfd312cae9e74d7564cf4b",
"size": "865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "posts/factories.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "834"
},
{
"name": "Python",
"bytes": "13463"
}
],
"symlink_target": ""
} |
from base64 import b64encode
from ipykernel.comm import Comm
from IPython import get_ipython
import StringIO
import urllib
_comm=None
def is_notebook():
iPython=get_ipython()
if iPython is None or not iPython.config:
return False
return 'IPKernelApp' in iPython.config
def connect():
"""
establish connection to frontend notebook
"""
if not is_notebook():
print('Python session is not running in a Notebook Kernel')
return
global _comm
kernel=get_ipython().kernel
kernel.comm_manager.register_target('tdb',handle_comm_opened)
# initiate connection to frontend.
_comm=Comm(target_name='tdb',data={})
# bind recv handler
_comm.on_msg(None)
def send_action(action, params=None):
"""
helper method for sending actions
"""
data={"msg_type":"action", "action":action}
if params is not None:
data['params']=params
_comm.send(data)
def send_fig(fig,name):
"""
sends figure to frontend
"""
imgdata = StringIO.StringIO()
fig.savefig(imgdata, format='png')
imgdata.seek(0) # rewind the data
uri = 'data:image/png;base64,' + urllib.quote(b64encode(imgdata.buf))
send_action("update_plot",params={"src":uri, "name":name})
# handler messages
def handle_comm_opened(msg):
# this won't appear in the notebook
print('comm opened')
print(msg) | {
"content_hash": "0f130ccaa077162830e576c4f2d645c0",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 70,
"avg_line_length": 23.163636363636364,
"alnum_prop": 0.7213500784929356,
"repo_name": "ericjang/tdb",
"id": "012f400781b02f5a5879c4550001b176e35b1b71",
"size": "1274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tdb/app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "503404"
},
{
"name": "Jupyter Notebook",
"bytes": "17035"
},
{
"name": "Python",
"bytes": "37177"
},
{
"name": "Shell",
"bytes": "54"
}
],
"symlink_target": ""
} |
import threading
import logging
import asyncore
close_sockets_lock = threading.Lock()
def _do_close_sockets():
for value in asyncore.socket_map.values():
logging.info("Close Socket: %s" % str(value))
value.socket.close()
logging.info("Clean socket map!")
asyncore.socket_map = {}
def close_sockets():
global close_sockets_lock
try:
close_sockets_lock.acquire()
_do_close_sockets()
finally:
close_sockets_lock.release()
def num_sockets():
global close_sockets_lock
try:
close_sockets_lock.acquire()
return len(asyncore.socket_map.values())
finally:
close_sockets_lock.release()
| {
"content_hash": "a9f36a92a283636d719bd0b7119aad31",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 53,
"avg_line_length": 21.40625,
"alnum_prop": 0.6423357664233577,
"repo_name": "blacktear23/py-servicebus",
"id": "705b131a58bc14943b550df9a1af9e11f2b6dd19",
"size": "685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "servicebus/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "123"
},
{
"name": "Python",
"bytes": "461610"
}
],
"symlink_target": ""
} |
from __future__ import division
import os
import sys
import time
import csv
import shutil
import threading
import errno
import tempfile
import collections
import re
from distutils.version import LooseVersion
try:
import pandas as pd
except ImportError:
pd = None
from wlauto import Instrument, Parameter, IterationResult
from wlauto.instrumentation import instrument_is_installed
from wlauto.exceptions import (InstrumentError, WorkerThreadError, ConfigError,
DeviceNotRespondingError, TimeoutError)
from wlauto.utils.types import boolean, numeric
from wlauto.utils.fps import (FpsProcessor, SurfaceFlingerFrame, GfxInfoFrame, GFXINFO_EXEMPT,
VSYNC_INTERVAL)
PAUSE_LATENCY = 20
EPSYLON = 0.0001
class FpsInstrument(Instrument):
name = 'fps'
description = """
Measures Frames Per Second (FPS) and associated metrics for a workload.
.. note:: This instrument depends on pandas Python library (which is not part of standard
WA dependencies), so you will need to install that first, before you can use it.
Android L and below use SurfaceFlinger to calculate the FPS data.
Android M and above use gfxinfo to calculate the FPS data.
SurfaceFlinger:
The view is specified by the workload as ``view`` attribute. This defaults
to ``'SurfaceView'`` for game workloads, and ``None`` for non-game
workloads (as for them FPS mesurement usually doesn't make sense).
Individual workloads may override this.
gfxinfo:
The view is specified by the workload as ``package`` attribute.
This is because gfxinfo already processes for all views in a package.
This instrument adds four metrics to the results:
:FPS: Frames Per Second. This is the frame rate of the workload.
:frame_count: The total number of frames rendered during the execution of
the workload.
:janks: The number of "janks" that occured during execution of the
workload. Janks are sudden shifts in frame rate. They result
in a "stuttery" UI. See http://jankfree.org/jank-busters-io
:not_at_vsync: The number of frames that did not render in a single
vsync cycle.
"""
supported_platforms = ['android']
parameters = [
Parameter('drop_threshold', kind=numeric, default=5,
description='Data points below this FPS will be dropped as they '
'do not constitute "real" gameplay. The assumption '
'being that while actually running, the FPS in the '
'game will not drop below X frames per second, '
'except on loading screens, menus, etc, which '
'should not contribute to FPS calculation. '),
Parameter('keep_raw', kind=boolean, default=False,
description='If set to ``True``, this will keep the raw dumpsys output '
'in the results directory (this is maily used for debugging) '
'Note: frames.csv with collected frames data will always be '
'generated regardless of this setting.'),
Parameter('generate_csv', kind=boolean, default=True,
description='If set to ``True``, this will produce temporal fps data '
'in the results directory, in a file named fps.csv '
'Note: fps data will appear as discrete step-like values '
'in order to produce a more meainingfull representation,'
'a rolling mean can be applied.'),
Parameter('crash_check', kind=boolean, default=True,
description="""
Specifies wither the instrument should check for crashed content by examining
frame data. If this is set, ``execution_time`` instrument must also be installed.
The check is performed by using the measured FPS and exection time to estimate the expected
frames cound and comparing that against the measured frames count. The the ratio of
measured/expected is too low, then it is assumed that the content has crashed part way
during the run. What is "too low" is determined by ``crash_threshold``.
.. note:: This is not 100\% fool-proof. If the crash occurs sufficiently close to
workload's termination, it may not be detected. If this is expected, the
threshold may be adjusted up to compensate.
"""),
Parameter('crash_threshold', kind=float, default=0.7,
description="""
Specifies the threshold used to decided whether a measured/expected frames ration indicates
a content crash. E.g. a value of ``0.75`` means the number of actual frames counted is a
quarter lower than expected, it will treated as a content crash.
"""),
Parameter('dumpsys_period', kind=float, default=2, constraint=lambda x: x > 0,
description="""
Specifies the time period between calls to ``dumpsys SurfaceFlinger --latency`` in
seconds when collecting frame data. Using a lower value improves the granularity
of timings when recording actions that take a short time to complete. Note, this
will produce duplicate frame data in the raw dumpsys output, however, this is
filtered out in frames.csv. It may also affect the overall load on the system.
The default value of 2 seconds corresponds with the NUM_FRAME_RECORDS in
android/services/surfaceflinger/FrameTracker.h (as of the time of writing
currently 128) and a frame rate of 60 fps that is applicable to most devices.
"""),
Parameter('force_surfaceflinger', kind=boolean, default=False,
description="""
By default, the method to capture fps data is based on Android version.
If this is set to true, force the instrument to use the SurfaceFlinger method
regardless of its Android version.
"""),
]
def __init__(self, device, **kwargs):
super(FpsInstrument, self).__init__(device, **kwargs)
self.collector = None
self.outfile = None
self.fps_outfile = None
self.is_enabled = True
self.fps_method = ''
def validate(self):
if not pd or LooseVersion(pd.__version__) < LooseVersion('0.13.1'):
message = ('fps instrument requires pandas Python package (version 0.13.1 or higher) to be installed.\n'
'You can install it with pip, e.g. "sudo pip install pandas"')
raise InstrumentError(message)
if self.crash_check and not instrument_is_installed('execution_time'):
raise ConfigError('execution_time instrument must be installed in order to check for content crash.')
def setup(self, context):
workload = context.workload
use_gfxinfo = not self.force_surfaceflinger and (self.device.get_sdk_version() >= 23)
if use_gfxinfo and not hasattr(workload, 'package'):
self.logger.debug('Workload does not contain a package; falling back to SurfaceFlinger...')
use_gfxinfo = False
if not use_gfxinfo and not hasattr(workload, 'view'):
self.logger.debug('Workload does not contain a view; disabling...')
self.is_enabled = False
return
self.fps_outfile = os.path.join(context.output_directory, 'fps.csv')
self.outfile = os.path.join(context.output_directory, 'frames.csv')
# Android M brings a new method of collecting FPS data
if use_gfxinfo:
# gfxinfo takes in the package name rather than a single view/activity
# so there is no 'list_command' to run and compare against a list of
# views/activities. Additionally, clearing the stats requires the package
# so we need to clear for every package in the workload.
# Usually there is only one package, but some workloads may run multiple
# packages so each one must be reset before continuing
self.fps_method = 'gfxinfo'
runcmd = 'dumpsys gfxinfo {} framestats'
lstcmd = None
params = workload.package
params = [params] if isinstance(params, basestring) else params
for pkg in params:
self.device.execute('dumpsys gfxinfo {} reset'.format(pkg))
else:
self.fps_method = 'surfaceflinger'
runcmd = 'dumpsys SurfaceFlinger --latency {}'
lstcmd = 'dumpsys SurfaceFlinger --list'
params = workload.view
self.device.execute('dumpsys SurfaceFlinger --latency-clear ')
self.collector = LatencyCollector(self.outfile, self.device, params or '',
self.keep_raw, self.logger, self.dumpsys_period,
runcmd, lstcmd, self.fps_method)
def start(self, context):
if self.is_enabled:
self.logger.debug('Starting Frame Statistics collection...')
self.collector.start()
def stop(self, context):
if self.is_enabled and self.collector.is_alive():
self.logger.debug('Stopping Frame Statistics collection...')
self.collector.stop()
def update_result(self, context):
if self.is_enabled:
fps, frame_count, janks, not_at_vsync = float('nan'), 0, 0, 0
p90, p95, p99 = [float('nan')] * 3
data = pd.read_csv(self.outfile)
if not data.empty: # pylint: disable=maybe-no-member
# gfxinfo method has an additional file generated that contains statistics
stats_file = None
if self.fps_method == 'gfxinfo':
stats_file = os.path.join(os.path.dirname(self.outfile), 'gfxinfo.csv')
fp = FpsProcessor(data, extra_data=stats_file)
per_frame_fps, metrics = fp.process(self.collector.refresh_period, self.drop_threshold)
fps, frame_count, janks, not_at_vsync = metrics
if self.generate_csv:
per_frame_fps.to_csv(self.fps_outfile, index=False, header=True)
context.add_artifact('fps', path='fps.csv', kind='data')
p90, p95, p99 = fp.percentiles()
context.result.add_metric('FPS', fps)
context.result.add_metric('frame_count', frame_count)
context.result.add_metric('janks', janks, lower_is_better=True)
context.result.add_metric('not_at_vsync', not_at_vsync, lower_is_better=True)
context.result.add_metric('frame_time_90percentile', p90, 'ms', lower_is_better=True)
context.result.add_metric('frame_time_95percentile', p95, 'ms', lower_is_better=True)
context.result.add_metric('frame_time_99percentile', p99, 'ms', lower_is_better=True)
def slow_update_result(self, context):
result = context.result
if self.crash_check and result.has_metric('execution_time'):
self.logger.debug('Checking for crashed content.')
exec_time = result['execution_time'].value
fps = result['FPS'].value
frames = result['frame_count'].value
if all([exec_time, fps, frames]):
expected_frames = fps * exec_time
ratio = frames / expected_frames
self.logger.debug('actual/expected frames: {:.2}'.format(ratio))
if ratio < self.crash_threshold:
self.logger.error('Content for {} appears to have crashed.'.format(context.spec.label))
result.status = IterationResult.FAILED
result.add_event('Content crash detected (actual/expected frames: {:.2}).'.format(ratio))
class LatencyCollector(threading.Thread):
# Note: the size of the frames buffer for a particular surface is defined
# by NUM_FRAME_RECORDS inside android/services/surfaceflinger/FrameTracker.h.
# At the time of writing, this was hard-coded to 128. So at 60 fps
# (and there is no reason to go above that, as it matches vsync rate
# on pretty much all phones), there is just over 2 seconds' worth of
# frames in there. Hence the default sleep time of 2 seconds between dumps.
def __init__(self, outfile, device, activities, keep_raw, logger, dumpsys_period,
run_command, list_command, fps_method):
super(LatencyCollector, self).__init__()
self.outfile = outfile
self.device = device
self.keep_raw = keep_raw
self.logger = logger
self.dumpsys_period = dumpsys_period
self.stop_signal = threading.Event()
self.frames = []
self.last_ready_time = 0
self.refresh_period = VSYNC_INTERVAL
self.drop_threshold = self.refresh_period * 1000
self.exc = None
self.unresponsive_count = 0
if isinstance(activities, basestring):
activities = [activities]
self.activities = activities
self.command_template = run_command
self.list_command = list_command
self.fps_method = fps_method
# Based on the fps_method, setup the header for the csv,
# and set the process_trace_line function accordingly
if fps_method == 'surfaceflinger':
self.header = SurfaceFlingerFrame._fields
self.process_trace_line = self._process_surfaceflinger_line
else:
self.header = GfxInfoFrame._fields
self.process_trace_line = self._process_gfxinfo_line
self.re_frame = re.compile('([0-9]+,)+')
self.re_stats = re.compile('.*(percentile|frames|Number).*')
# Create a template summary text block that matches what gfxinfo gives after a reset
# - 133 is the default ms value for percentiles after reset
self.summary = collections.OrderedDict((('Total frames rendered', 0),
('Janky frames', 0),
('90th percentile', 133),
('95th percentile', 133),
('99th percentile', 133),
('Number Missed Vsync', 0),
('Number High input latency', 0),
('Number Slow UI thread', 0),
('Number Slow bitmap uploads', 0),
('Number Slow issue draw commands', 0)))
def run(self):
try:
self.logger.debug('Frame Statistics collection started. Method: ' + self.fps_method)
self.stop_signal.clear()
fd, temp_file = tempfile.mkstemp()
self.logger.debug('temp file: {}'.format(temp_file))
wfh = os.fdopen(fd, 'wb')
try:
view_list = self.activities
while not self.stop_signal.is_set():
# If a list_command is provided, set the view_list to be its output
# Then check for each activity in this list and if there is a match,
# process the output. If no command is provided, then always process.
if self.list_command:
view_list = self.device.execute(self.list_command).split()
for activity in self.activities:
if activity in view_list:
wfh.write(self.device.execute(self.command_template.format(activity)))
time.sleep(self.dumpsys_period)
finally:
wfh.close()
# TODO: this can happen after the run during results processing
with open(temp_file) as fh:
text = fh.read().replace('\r\n', '\n').replace('\r', '\n')
for line in text.split('\n'):
line = line.strip()
if line:
self.process_trace_line(line)
if self.keep_raw:
raw_file = os.path.join(os.path.dirname(self.outfile), self.fps_method + '.raw')
shutil.copy(temp_file, raw_file)
os.unlink(temp_file)
except (DeviceNotRespondingError, TimeoutError): # pylint: disable=W0703
raise
except Exception, e: # pylint: disable=W0703
self.logger.warning('Exception on collector thread: {}({})'.format(e.__class__.__name__, e))
self.exc = WorkerThreadError(self.name, sys.exc_info())
self.logger.debug('Frame Statistics collection stopped.')
with open(self.outfile, 'w') as wfh:
writer = csv.writer(wfh)
writer.writerow(self.header)
writer.writerows(self.frames)
self.logger.debug('Frames data written.')
# gfxinfo outputs its own summary statistics for the run.
# No point calculating those from the raw data, so store in its own file for later use.
if self.fps_method == 'gfxinfo':
stats_file = os.path.join(os.path.dirname(self.outfile), 'gfxinfo.csv')
with open(stats_file, 'w') as wfh:
writer = csv.writer(wfh)
writer.writerows(zip(self.summary.keys(), self.summary.values()))
self.logger.debug('Gfxinfo summary data written.')
def stop(self):
self.stop_signal.set()
self.join()
if self.unresponsive_count:
message = 'LatencyCollector was unrepsonsive {} times.'.format(self.unresponsive_count)
if self.unresponsive_count > 10:
self.logger.warning(message)
else:
self.logger.debug(message)
if self.exc:
raise self.exc # pylint: disable=E0702
self.logger.debug('Frame Statistics complete.')
def _process_surfaceflinger_line(self, line):
parts = line.split()
if len(parts) == 3:
frame = SurfaceFlingerFrame(*map(int, parts))
if frame.frame_ready_time <= self.last_ready_time:
return # duplicate frame
if (frame.frame_ready_time - frame.desired_present_time) > self.drop_threshold:
self.logger.debug('Dropping bogus frame {}.'.format(line))
return # bogus data
self.last_ready_time = frame.frame_ready_time
self.frames.append(frame)
elif len(parts) == 1:
self.refresh_period = int(parts[0])
self.drop_threshold = self.refresh_period * 1000
elif 'SurfaceFlinger appears to be unresponsive, dumping anyways' in line:
self.unresponsive_count += 1
else:
self.logger.warning('Unexpected SurfaceFlinger dump output: {}'.format(line))
def _process_gfxinfo_line(self, line):
if 'No process found for' in line:
self.unresponsive_count += 1
return
# Process lines related to the frame data
match = self.re_frame.match(line)
if match:
data = match.group(0)[:-1]
data = map(int, data.split(','))
# Ignore additional fields
data = data[:len(self.header)]
frame = GfxInfoFrame(*data)
if frame not in self.frames:
if frame.Flags & GFXINFO_EXEMPT:
self.logger.debug('Dropping exempt frame {}.'.format(line))
else:
self.frames.append(frame)
return
# Process lines related to the summary statistics
match = self.re_stats.match(line)
if match:
data = match.group(0)
title, value = data.split(':', 1)
title = title.strip()
value = value.strip()
if title in self.summary:
if 'ms' in value:
value = value.strip('ms')
if '%' in value:
value = value.split()[0]
self.summary[title] = int(value)
| {
"content_hash": "426c73b87fa796a0a549bdc4cf5f7f43",
"timestamp": "",
"source": "github",
"line_count": 405,
"max_line_length": 116,
"avg_line_length": 51.14567901234568,
"alnum_prop": 0.5829873515496765,
"repo_name": "bjackman/workload-automation",
"id": "ad630bdb87767c76cfed49525eb8c2dc90bee7c3",
"size": "21331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wlauto/instrumentation/fps/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "40003"
},
{
"name": "HTML",
"bytes": "243720"
},
{
"name": "Java",
"bytes": "226912"
},
{
"name": "JavaScript",
"bytes": "6578"
},
{
"name": "Jupyter Notebook",
"bytes": "1322"
},
{
"name": "Makefile",
"bytes": "430"
},
{
"name": "Python",
"bytes": "1555462"
},
{
"name": "Shell",
"bytes": "39222"
},
{
"name": "Vim script",
"bytes": "901"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import curses
import io
import json
import os
import re
import sys
import app.default_prefs
import app.log
import app.regex
class Prefs:
def __init__(self):
self.prefsDirectory = "~/.ci_edit/prefs/"
prefs = app.default_prefs.prefs
self.color8 = app.default_prefs.color8
self.color16 = app.default_prefs.color16
self.color256 = app.default_prefs.color256
self.color = self.color256
self.dictionaries = prefs.get("dictionaries", [])
self.editor = prefs.get("editor", {})
self.devTest = prefs.get("devTest", {})
self.palette = prefs.get("palette", {})
self.startup = {}
self.status = prefs.get(u"status", {})
self.userData = prefs.get(u"userData", {})
self.__set_up_grammars(prefs.get(u"grammar", {}))
self.__set_up_file_types(prefs.get(u"fileType", {}))
self.init()
def load_prefs(self, fileName, category):
# Check the user home directory for preferences.
prefsPath = os.path.expanduser(
os.path.expandvars(
os.path.join(self.prefsDirectory, "%s.json" % (fileName,))
)
)
if os.path.isfile(prefsPath) and os.access(prefsPath, os.R_OK):
with io.open(prefsPath, "r") as f:
try:
additionalPrefs = json.loads(f.read())
app.log.startup(additionalPrefs)
category.update(additionalPrefs)
app.log.startup("Updated editor prefs from", prefsPath)
app.log.startup("as", category)
except Exception as e:
app.log.startup("failed to parse", prefsPath)
app.log.startup("error", e)
return category
def init(self):
self.editor = self.load_prefs("editor", self.editor)
self.status = self.load_prefs("status", self.status)
self.colorSchemeName = self.editor["colorScheme"]
if self.colorSchemeName == "custom":
# Check the user home directory for a color scheme preference. If
# found load it to replace the default color scheme.
self.color = self.load_prefs("color_scheme", self.color)
defaultColor = self.color["default"]
defaultKeywordsColor = self.color["keyword"]
defaultSpecialsColor = self.color["special"]
for k, v in self.grammars.items():
# Colors.
v["colorIndex"] = self.color.get(k, defaultColor)
if 0:
v["keywordsColor"] = curses.color_pair(
self.color.get(k + "_keyword_color", defaultKeywordsColor)
)
v["specialsColor"] = curses.color_pair(
self.color.get(k + "_special_color", defaultSpecialsColor)
)
app.log.info("prefs init")
def category(self, name):
return {
"color": self.color,
"editor": self.editor,
"startup": self.startup,
}[name]
def get_file_type(self, filePath):
if filePath is None:
return self.grammars.get("text")
name = os.path.split(filePath)[1]
fileType = self.nameToType.get(name)
if fileType is None:
fileExtension = os.path.splitext(name)[1]
fileType = self.extensions.get(fileExtension, "text")
return fileType
def tabs_to_spaces(self, fileType):
prefs = app.default_prefs.prefs.get(u"fileType", {})
if fileType is None or prefs is None:
return False
file_prefs = prefs.get(fileType)
return file_prefs and file_prefs.get(u"tabToSpaces")
def get_grammar(self, fileType):
return self.grammars.get(fileType)
def save(self, category, label, value):
app.log.info(category, label, value)
prefCategory = self.category(category)
prefCategory[label] = value
prefsPath = os.path.expanduser(
os.path.expandvars(
os.path.join(self.prefsDirectory, "%s.json" % (category,))
)
)
with io.open(prefsPath, "w", encoding=u"utf-8") as f:
try:
f.write(json.dumps(prefs[category]))
except Exception as e:
app.log.error("error writing prefs")
app.log.exception(e)
def _raise_grammar_not_found(self):
app.log.startup("Available grammars:")
for k, v in self.grammars.items():
app.log.startup(" ", k, ":", len(v))
raise Exception('missing grammar for "' + grammarName + '" in prefs.py')
def __set_up_grammars(self, defaultGrammars):
self.grammars = {}
# Arrange all the grammars by name.
for k, v in defaultGrammars.items():
v["name"] = k
self.grammars[k] = v
# Compile regexes for each grammar.
for k, v in defaultGrammars.items():
if 0:
# keywords re.
v["keywordsRe"] = re.compile(
app.regex.join_re_word_list(
v.get("keywords", []) + v.get("types", [])
)
)
v["errorsRe"] = re.compile(app.regex.join_re_list(v.get("errors", [])))
v["specialsRe"] = re.compile(
app.regex.join_re_list(v.get("special", []))
)
# contains and end re.
matchGrammars = []
markers = []
# Index [0]
if v.get("escaped"):
markers.append(v["escaped"])
matchGrammars.append(v)
else:
# Add a non-matchable placeholder.
markers.append(app.regex.kNonMatchingRegex)
matchGrammars.append(None)
# Index [1]
if v.get("end"):
markers.append(v["end"])
matchGrammars.append(v)
else:
# Add a non-matchable placeholder.
markers.append(app.regex.kNonMatchingRegex)
matchGrammars.append(None)
# |Contains| markers start at index 2.
for grammarName in v.get("contains", []):
g = self.grammars.get(grammarName, None)
if g is None:
self._raise_grammar_not_found()
markers.append(g.get("begin", g.get("matches", u"")))
matchGrammars.append(g)
# |Next| markers start after |contains|.
for grammarName in v.get("next", []):
g = self.grammars.get(grammarName, None)
if g is None:
self._raise_grammar_not_found()
markers.append(g["begin"])
matchGrammars.append(g)
# |Errors| markers start after |next| markers.
markers += v.get("errors", [])
# |Keywords| markers start after |errors| markers.
for keyword in v.get("keywords", []):
markers.append(r"\b" + keyword + r"\b")
# |Types| markers start after |keywords| markers.
for types in v.get("types", []):
markers.append(r"\b" + types + r"\b")
# |Special| markers start after |types| markers.
markers += v.get("special", [])
# Variable width characters are at index [-3] in markers.
markers.append(r"\t+")
# Potentially double wide characters are at index [-2] in markers.
markers.append(u"[\U00001100-\U000fffff]+")
# Carriage return characters are at index [-1] in markers.
markers.append(r"\n")
# app.log.startup('markers', v['name'], markers)
v["matchRe"] = re.compile(app.regex.join_re_list(markers))
v["markers"] = markers
v["matchGrammars"] = matchGrammars
containsGrammarIndexLimit = 2 + len(v.get("contains", []))
nextGrammarIndexLimit = containsGrammarIndexLimit + len(v.get("next", []))
errorIndexLimit = nextGrammarIndexLimit + len(v.get("errors", []))
keywordIndexLimit = errorIndexLimit + len(v.get("keywords", []))
typeIndexLimit = keywordIndexLimit + len(v.get("types", []))
specialIndexLimit = typeIndexLimit + len(v.get("special", []))
v["indexLimits"] = (
containsGrammarIndexLimit,
nextGrammarIndexLimit,
errorIndexLimit,
keywordIndexLimit,
typeIndexLimit,
specialIndexLimit,
)
# Reset the re.cache for user regexes.
re.purge()
def __set_up_file_types(self, defaultFileTypes):
self.nameToType = {}
self.extensions = {}
fileTypes = {}
for k, v in defaultFileTypes.items():
for name in v.get("name", []):
self.nameToType[name] = v.get("grammar")
for ext in v["ext"]:
self.extensions[ext] = v.get("grammar")
fileTypes[k] = v
if 0:
app.log.info("extensions")
for k, v in extensions.items():
app.log.info(" ", k, ":", v)
app.log.info("fileTypes")
for k, v in fileTypes.items():
app.log.info(" ", k, ":", v)
| {
"content_hash": "ba96656c3baf8498db46ccc6bc370296",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 87,
"avg_line_length": 40.0210970464135,
"alnum_prop": 0.5340010542962572,
"repo_name": "google/ci_edit",
"id": "39aac1f034f82771a57e068f74051b36cdf13c86",
"size": "10061",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/prefs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "865502"
},
{
"name": "Rust",
"bytes": "9249"
},
{
"name": "Shell",
"bytes": "5986"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'questionnaire'
copyright = u'2013, ChangeMyName'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'questionnairedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'questionnaire.tex', u'questionnaire Documentation',
u'ChangeToMyName', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'questionnaire', u'questionnaire Documentation',
[u'ChangeToMyName'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'questionnaire', u'questionnaire Documentation',
u'ChangeToMyName', 'questionnaire', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| {
"content_hash": "5e6c8c450d7b3b251cdd98fce9b5ef96",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 80,
"avg_line_length": 32.157205240174676,
"alnum_prop": 0.7038294405214557,
"repo_name": "vadosl/questionnaire",
"id": "1bd24a4df4b690774f3e37f21eeab874e7b1ddf3",
"size": "7788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "39"
},
{
"name": "JavaScript",
"bytes": "45"
},
{
"name": "Python",
"bytes": "33631"
},
{
"name": "Shell",
"bytes": "5120"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.contrib.auth.models import User
from django.test.client import Client, RequestFactory
from django.http import HttpResponse
from .forms import QueryForm, QueryModelForm, Field
from .operators import AND, OR, BaseOperator
from .decorators import validate
A = Field(required=False)
B = Field(required=False)
C = Field(required=False)
D = Field(required=False)
E = Field(required=False)
F = Field(required=False)
class QueryModelFormTestCase(TestCase):
def test_validate_decorator(self):
class Form(QueryModelForm):
class Meta:
model = User
fields = ('username', 'email')
extralogic = [AND('username', 'email'),]
def view(request, pk):
return HttpResponse('success')
def another_view(request, id):
return HttpResponse('success')
user = User.objects.create(username='user', email='user@email')
factory = RequestFactory()
decorator = validate(Form)
request = factory.get('', {'username':1})
response = decorator(view)(request, pk=user.pk)
self.assertNotEqual(response.content, 'success')
self.assertEqual(request.form.instance, user)
request = factory.get('', {'username':'user', 'email':'1@email.com'})
response = decorator(view)(request, pk=user.pk)
self.assertEqual(response.content, 'success')
self.assertEqual(request.form.instance, user)
request = factory.get('')
response = decorator(view)(request, pk=user.pk)
self.assertNotEqual(response.content, 'success')
self.assertEqual(request.form.instance, user)
decorator = validate(Form, add_instance_using='id')
request = factory.get('', {})
response = decorator(another_view)(request, id=user.pk)
self.assertNotEqual(response.content, 'success')
self.assertEqual(request.form.instance, user)
decorator = validate(Form, add_instance_using='id')
request = factory.get('', {'username':1})
response = decorator(another_view)(request, pk=user.pk)
self.assertNotEqual(response.content, 'success')
self.assertNotEqual(request.form.instance, user)
class QueryFormTestCase(TestCase):
def test_BoundingBoxField(self):
try:
from .forms import BoundingBoxField
except ImportError:
return
class Form(QueryForm):
box = BoundingBoxField()
lat0, lng0 = 40.0, -8.1
lat1, lng1 = 41.0, -9.1
f = Form({
'box_0': '%s,%s'%(lng0,lat0),
'box_1': '%s,%s'%(lng1,lat1),
})
self.assertTrue(f.is_valid())
self.assertTrue('box' in f.cleaned_data)
polygon1 = f.cleaned_data['box']
self.assertEqual(polygon1.num_points, 5)
self.assertEqual(polygon1, f.parameters['box'])
class Form(QueryForm):
box = BoundingBoxField(latitude_first=True)
class Meta:
lookups = {
'box': 'point__contained'
}
f = Form({
'box_0': '%s,%s'%(lat0,lng0),
'box_1': '%s,%s'%(lat1,lng1),
})
self.assertTrue(f.is_valid())
self.assertTrue('box' in f.cleaned_data)
polygon2 = f.cleaned_data['box']
self.assertEqual(polygon2.num_points, 5)
self.assertEqual(polygon2, f.parameters['point__contained'])
self.assertEqual(polygon1, polygon2)
def test_CoordsField(self):
try:
from .forms import CoordsField
except ImportError:
return
lat, lng = 40.0, -8.1
def assert_form(lat, lng, form):
self.assertTrue(form.is_valid())
self.assertTrue('coords' in form.cleaned_data)
coords = form.cleaned_data['coords'].coords
self.assertEqual(coords[0], lng)
self.assertEqual(coords[1], lat)
self.assertEqual(form.cleaned_data['coords'], form.parameters['coords'])
class Form(QueryForm):
coords = CoordsField()
f = Form({'coords': '%s,%s'%(lng,lat)})
assert_form(lat=lat, lng=lng, form=f)
class Form(QueryForm):
coords = CoordsField(latitude_first=True)
f = Form({'coords': '%s,%s'%(lat,lng)})
assert_form(lat=lat, lng=lng, form=f)
def test_CircleField(self):
try:
from .forms import CircleField
except ImportError:
return
class Form(QueryForm):
center = CircleField()
lat, lng = 40.0, -8.1
f = Form({'center': '%s,%s'%(lng,lat)})
self.assertTrue(f.is_valid())
self.assertTrue('center' in f.cleaned_data)
self.assertTrue(isinstance(f.cleaned_data['center'], tuple))
coords = f.cleaned_data['center'][0].coords
self.assertEqual(coords[0], lng)
self.assertEqual(coords[1], lat)
self.assertEqual(f.cleaned_data['center'], f.parameters['center'])
def test_validate_decorator(self):
class Form(QueryForm):
a, b = A, B
class Meta:
extralogic = [AND('a', 'b'),]
def view(request):
return HttpResponse('success')
factory = RequestFactory()
decorator = validate(Form)
request = factory.get('', {'a':1})
wrapper = decorator(view)
self.assertEqual(wrapper.__name__, view.__name__)
self.assertNotEqual(wrapper(request).content, 'success')
request = factory.get('', {'a':1, 'b':1})
self.assertEqual(wrapper(request).content, 'success')
request = factory.get('')
self.assertEqual(wrapper(request).content, 'success')
def test_lookups(self):
class Form(QueryForm):
a, b, c = A, B, C
class Meta:
lookups = {
'a': 'a__contains',
'b': 'b__id',
}
f = Form({'a': 1, 'b':1, 'c':1})
lookups = f._meta.lookups.values()
self.assertEqual(len(lookups), len(f.fields))
self.assertTrue('a__contains' in lookups)
self.assertEqual(f._meta.lookups['a'], 'a__contains')
self.assertTrue('b__id' in lookups)
self.assertEqual(f._meta.lookups['b'], 'b__id')
self.assertTrue('c' in lookups)
self.assertEqual(f._meta.lookups['c'], 'c')
self.assertFalse('c__year' in lookups)
def test_multifield_lookups(self):
class Form(QueryForm):
a, b = A, B
class Meta:
multifield_lookups = {
('a', 'b'): lambda a,b: {'a__range': (a-b, a)}
}
f = Form({'a':2, 'b':1})
lookups = f._meta.multifield_lookups
self.assertTrue(('a', 'b') in lookups)
self.assertTrue(f.is_valid())
self.assertTrue('a__range' in f.parameters)
self.assertTrue('a' in f._validated_data)
self.assertTrue('b' in f._validated_data)
self.assertEqual(f.parameters['a__range'], (1, 2))
def test_parameters(self):
class Form(QueryForm):
a, b, c = A, B, C
class Meta:
lookups = {
'a': 'a__contains',
'b': 'b__id',
'c': 'c__year',
}
f = Form({'a': 1, 'b':1, 'c':1})
self.assertTrue(f.is_valid())
for fieldname,lookup in f._meta.lookups.items():
self.assertTrue(f.parameters.get(lookup, None))
self.assertEqual(f.cleaned_data[fieldname], f.parameters[lookup])
f = Form({'a': 1})
self.assertTrue(f.is_valid())
self.assertTrue('a__contains' in f.parameters)
self.assertTrue('a' in f.cleaned_data)
self.assertFalse('b__id' in f.parameters)
self.assertTrue('b' in f.cleaned_data)
def test_default_data(self):
class Form(QueryForm):
a = Field(required=False, initial=1)
f = Form({})
self.assertTrue(f.is_valid())
self.assertTrue('a' in f._validated_data)
self.assertTrue('a' in f.parameters)
self.assertEqual(f._validated_data['a'], f.fields['a'].initial)
self.assertEqual(f._validated_data['a'], f.parameters['a'])
f = Form({'a': 2})
self.assertTrue(f.is_valid())
self.assertTrue('a' in f._validated_data)
self.assertTrue('a' in f.parameters)
self.assertNotEqual(f._validated_data['a'], f.fields['a'].initial)
self.assertEqual(f._validated_data['a'], f.parameters['a'])
class Form(QueryForm):
a = Field(required=False, initial=1)
b = Field(required=False)
class Meta:
extralogic = [AND('a', 'b')]
f = Form({})
self.assertTrue(f.is_valid())
self.assertTrue('a' not in f._validated_data)
self.assertTrue('a' not in f.parameters)
# self.assertEqual(f._validated_data['a'], f.fields['a'].initial)
def test_no_defaults(self):
class Form(QueryForm):
a = Field(required=False, initial=1)
class Meta:
no_defaults = True
f = Form({})
self.assertTrue(f.is_valid())
self.assertFalse('a' in f._validated_data)
self.assertFalse('a' in f.parameters)
def test_required(self):
class Form(QueryForm):
a = Field()
class Meta:
required = ['a']
f = Form()
self.assertFalse(f.is_valid())
f = Form({'a': 1})
self.assertTrue(f.is_valid())
def test_ignore(self):
class Form(QueryForm):
a, b = A, B
class Meta:
ignore = ['a']
f = Form({'a': 1})
self.assertTrue(f.is_valid())
self.assertTrue('a' in f._validated_data)
self.assertFalse('a' in f.parameters)
f = Form({'b': 1})
self.assertTrue(f.is_valid())
self.assertTrue('b' in f._validated_data)
self.assertTrue('b' in f.parameters)
def test_is_valid_with_or(self):
class Form(QueryForm):
a, b, c = A, B, C
class Meta:
extralogic = [
OR('a', 'b', 'c'),
]
f = Form({})
self.assertTrue(f.is_valid())
self.assertTrue('a' not in f.parameters)
self.assertTrue('b' not in f.parameters)
self.assertTrue('c' not in f.parameters)
f = Form({'a':1})
self.assertTrue(f.is_valid())
self.assertTrue('a' in f.parameters)
self.assertTrue('b' not in f.parameters)
self.assertTrue('c' not in f.parameters)
f = Form({'b':1})
self.assertTrue(f.is_valid())
self.assertTrue('a' not in f.parameters)
self.assertTrue('b' in f.parameters)
self.assertTrue('c' not in f.parameters)
f = Form({'c':1})
self.assertTrue(f.is_valid())
self.assertTrue('a' not in f.parameters)
self.assertTrue('b' not in f.parameters)
self.assertTrue('c' in f.parameters)
f = Form({'a':1, 'b':1})
self.assertTrue(f.is_valid())
self.assertTrue('a' in f.parameters)
self.assertTrue('b' not in f.parameters)
self.assertTrue('c' not in f.parameters)
f = Form({'a':1, 'b':1, 'c':1})
self.assertTrue(f.is_valid())
self.assertTrue('a' in f.parameters)
self.assertTrue('b' not in f.parameters)
self.assertTrue('c' not in f.parameters)
class Form(QueryForm):
a, b = A, B
class Meta:
extralogic = [
OR('a', 'b', required=True),
]
f = Form({})
self.assertFalse(f.is_valid())
self.assertTrue('a' not in f.parameters)
self.assertTrue('b' not in f.parameters)
f = Form({'a':1})
self.assertTrue(f.is_valid())
self.assertTrue('a' in f.parameters)
self.assertTrue('b' not in f.parameters)
f = Form({'b':1})
self.assertTrue(f.is_valid())
self.assertTrue('a' not in f.parameters)
self.assertTrue('b' in f.parameters)
f = Form({'a':1, 'b':1})
self.assertTrue(f.is_valid())
self.assertTrue('a' in f.parameters)
self.assertTrue('b' not in f.parameters)
def test_is_valid_with_and(self):
class Form(QueryForm):
a, b, c = A, B, C
class Meta:
extralogic = [
AND('a', 'b', 'c'),
]
f = Form({})
self.assertTrue(f.is_valid())
self.assertTrue('a' not in f.parameters)
self.assertTrue('b' not in f.parameters)
self.assertTrue('c' not in f.parameters)
f = Form({'a':1})
self.assertFalse(f.is_valid())
f = Form({'a':1, 'b':1})
self.assertFalse(f.is_valid())
f = Form({'a':1, 'b':1, 'c':1})
self.assertTrue(f.is_valid())
self.assertTrue('a' in f.parameters)
self.assertTrue('b' in f.parameters)
self.assertTrue('c' in f.parameters)
class Form(QueryForm):
a = Field(required=False, initial=1)
b = Field(required=False)
class Meta:
extralogic = [
AND('a', 'b'),
]
f = Form({})
self.assertTrue(f.is_valid())
self.assertTrue('a' not in f.parameters)
self.assertTrue('b' not in f.parameters)
f = Form({'a':1})
self.assertFalse(f.is_valid())
f = Form({'a':2})
self.assertFalse(f.is_valid())
f = Form({'a':1, 'b':1})
self.assertTrue(f.is_valid())
self.assertTrue('a' in f.parameters)
self.assertTrue('b' in f.parameters)
class Form(QueryForm):
a = Field(required=False, initial=1)
b = Field(required=False)
c = Field(required=False)
class Meta:
extralogic = [
AND('a', 'b', 'c'),
]
f = Form({'b':1})
self.assertFalse(f.is_valid())
f = Form({'a':1, 'b':1})
self.assertFalse(f.is_valid())
f = Form({'a':2, 'b':1})
self.assertFalse(f.is_valid())
class Form(QueryForm):
a = Field(required=True)
b = Field(required=False, initial=1)
class Meta:
extralogic = [
AND('a', 'b'),
]
f = Form({})
self.assertFalse(f.is_valid())
f = Form({'a':1})
self.assertTrue(f.is_valid())
self.assertTrue('a' in f.parameters)
self.assertEqual(f.parameters['a'], 1)
self.assertTrue('b' in f.parameters)
self.assertEqual(f.parameters['b'], 1)
f = Form({'a':1, 'b':2})
self.assertTrue(f.is_valid())
self.assertTrue('a' in f.parameters)
self.assertEqual(f.parameters['a'], 1)
self.assertTrue('b' in f.parameters)
self.assertEqual(f.parameters['b'], 2)
f = Form({'b':1})
self.assertFalse(f.is_valid())
f = Form({'b':2})
self.assertFalse(f.is_valid())
def test_nexted_logic(self):
class Form(QueryForm):
a, b, c = A, B, C
class Meta:
extralogic = [
AND('a', OR('b', 'c')),
]
f = Form({})
self.assertTrue(f.is_valid())
self.assertTrue('a' not in f.parameters)
self.assertTrue('b' not in f.parameters)
self.assertTrue('c' not in f.parameters)
f = Form({'a':1})
self.assertFalse(f.is_valid())
f = Form({'a':1, 'b':2})
self.assertTrue(f.is_valid())
self.assertTrue('a' in f.parameters)
self.assertTrue('b' in f.parameters)
self.assertTrue('c' not in f.parameters)
f = Form({'a':1, 'c':2})
self.assertTrue(f.is_valid())
self.assertTrue('a' in f.parameters)
self.assertTrue('b' not in f.parameters)
self.assertTrue('c' in f.parameters)
def test_extralogic(self):
class Form(QueryForm):
a, b, c, d, e, f = A, B, C, D, E, F
class Meta:
extralogic = [
AND('b', 'c', 'd'),
OR('e', 'a', 'f'),
AND('c', OR('d', 'e'), 'a'),
OR(AND('b', 'c', 'd'), 'e'),
]
form = Form()
logic = form._meta.extralogic
assert len(logic) == 4
assert isinstance(logic[0], AND)
assert isinstance(logic[0].operands[0], Field)
assert isinstance(logic[0].operands[1], Field)
assert isinstance(logic[0].operands[2], Field)
assert str(logic[0]) == "( b AND c AND d )"
assert len(logic[0]) == 3
assert isinstance(logic[1], OR)
assert isinstance(logic[1].operands[0], Field)
assert isinstance(logic[1].operands[1], Field)
assert isinstance(logic[1].operands[2], Field)
assert str(logic[1]) == "( e OR a OR f )"
assert len(logic[1]) == 3
assert isinstance(logic[2], AND)
assert isinstance(logic[2].operands[0], Field)
assert isinstance(logic[2].operands[1], OR)
assert isinstance(logic[2].operands[2], Field)
assert isinstance(logic[2].operands[1].operands[0], Field)
assert isinstance(logic[2].operands[1].operands[1], Field)
assert str(logic[2]) == "( c AND ( d OR e ) AND a )", str(logic[2])
assert len(logic[2]) == 4
assert isinstance(logic[3], OR)
assert isinstance(logic[3].operands[0], AND)
assert isinstance(logic[3].operands[1], Field)
assert isinstance(logic[3].operands[0].operands[0], Field)
assert isinstance(logic[3].operands[0].operands[1], Field)
assert isinstance(logic[3].operands[0].operands[2], Field)
assert str(logic[3]) == "( ( b AND c AND d ) OR e )"
assert len(logic[3]) == 4
| {
"content_hash": "e6eb72e8f94123d8237576a6623abf1a",
"timestamp": "",
"source": "github",
"line_count": 549,
"max_line_length": 84,
"avg_line_length": 34.39890710382514,
"alnum_prop": 0.51289383108287,
"repo_name": "laginha/django-alo-forms",
"id": "9504b13dc3c9ac5ff53f089138b59cdae85e214f",
"size": "18925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/alo/tests.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41407"
}
],
"symlink_target": ""
} |
from model.new_user_data import N_u_d
import random
def test_modify_contact(app, db, check_ui):
if len(db.get_contact_list()) == 0:
app.contact.Create()
app.contact.Fill_in(N_u_d(namef="Rus", namem="Si", namel="An"))
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
contact_info = N_u_d(namel= "Tree", namef= "Oak", namem="Great", nick="Alexander", title="Super boss", firm="0",
addr="Moscow", phone_h="0", phone_m="0", phone_work="0", phone_fax="0", email_2="0@vz.vz",
day_1 = "//div[@id='content']/form[1]/select[1]//option[9]",
month_2 = "//div[@id='content']/form[1]/select[4]//option[10]", address_2="0", phone_h2="0", id=contact.id)
app.contact.modify_by_id(contact.id, contact_info)
new_contacts = app.contact.get_contact_list()
old_contacts.remove(contact)
old_contacts.append(contact_info)
assert sorted(old_contacts, key=N_u_d.id_or_max) == sorted(new_contacts, key=N_u_d.id_or_max)
if check_ui:
assert sorted(new_contacts, key=N_u_d.id_or_max) == sorted(app.contact.get_contact_list(), key=N_u_d.id_or_max)
| {
"content_hash": "e1a423b18dc55f7705cc05e4af7b9a42",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 127,
"avg_line_length": 46.96,
"alnum_prop": 0.6115843270868825,
"repo_name": "SCLT1975/python_training",
"id": "caff21b64af51290e588143ec7657baddf1be28c",
"size": "1199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_modify_contact.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1233"
},
{
"name": "C",
"bytes": "409362"
},
{
"name": "C++",
"bytes": "129981"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "29810"
},
{
"name": "Tcl",
"bytes": "1285363"
}
],
"symlink_target": ""
} |
import re
import configparser
import csv
import os
from os import path
from config_util import config_read
def fetch_content(arg_file):
conf = config_read()
contents_path = conf["contents"]
files = os.listdir(contents_path)
for file in files:
if file == arg_file:
title = file
post_text = []
post_title = title.rstrip('\.txt')
with open(contents_path + "/" + file, encoding='utf-8') as f:
for line in f:
post_text.append(line)
return "".join(post_text)
def fetch_image(arg_file):
conf = config_read()
postdatas_path = conf["images"]
files = os.listdir(postdatas_path)
for f in files:
if f == arg_file:
return path.join(postdatas_path, f)
| {
"content_hash": "5bea20c62c8843d7aa1163f91c7197ab",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 73,
"avg_line_length": 24.75,
"alnum_prop": 0.5808080808080808,
"repo_name": "shinshin86/APWP",
"id": "2986bf01696ca43390722d7764a3c698807728f0",
"size": "816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/post.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7931"
}
],
"symlink_target": ""
} |
from m5.params import *
from m5.objects import *
from BaseTopology import SimpleTopology
class Mesh(SimpleTopology):
description='Mesh'
def __init__(self, controllers):
self.nodes = controllers
# Makes a generic mesh assuming an equal number of cache and directory cntrls
def makeTopology(self, options, network, IntLink, ExtLink, Router):
nodes = self.nodes
num_routers = options.num_cpus
num_rows = options.mesh_rows
# There must be an evenly divisible number of cntrls to routers
# Also, obviously the number or rows must be <= the number of routers
cntrls_per_router, remainder = divmod(len(nodes), num_routers)
assert(num_rows <= num_routers)
num_columns = int(num_routers / num_rows)
assert(num_columns * num_rows == num_routers)
# Create the routers in the mesh
routers = [Router(router_id=i) for i in range(num_routers)]
network.routers = routers
# link counter to set unique link ids
link_count = 0
# Add all but the remainder nodes to the list of nodes to be uniformly
# distributed across the network.
network_nodes = []
remainder_nodes = []
for node_index in xrange(len(nodes)):
if node_index < (len(nodes) - remainder):
network_nodes.append(nodes[node_index])
else:
remainder_nodes.append(nodes[node_index])
# Connect each node to the appropriate router
ext_links = []
for (i, n) in enumerate(network_nodes):
cntrl_level, router_id = divmod(i, num_routers)
assert(cntrl_level < cntrls_per_router)
ext_links.append(ExtLink(link_id=link_count, ext_node=n,
int_node=routers[router_id]))
link_count += 1
# Connect the remainding nodes to router 0. These should only be
# DMA nodes.
for (i, node) in enumerate(remainder_nodes):
assert(node.type == 'DMA_Controller')
assert(i < remainder)
ext_links.append(ExtLink(link_id=link_count, ext_node=node,
int_node=routers[0]))
link_count += 1
network.ext_links = ext_links
# Create the mesh links. First row (east-west) links then column
# (north-south) links
int_links = []
for row in xrange(num_rows):
for col in xrange(num_columns):
if (col + 1 < num_columns):
east_id = col + (row * num_columns)
west_id = (col + 1) + (row * num_columns)
int_links.append(IntLink(link_id=link_count,
node_a=routers[east_id],
node_b=routers[west_id],
weight=1, latency=0))
link_count += 1
for col in xrange(num_columns):
for row in xrange(num_rows):
if (row + 1 < num_rows):
north_id = col + (row * num_columns)
south_id = col + ((row + 1) * num_columns)
int_links.append(IntLink(link_id=link_count,
node_a=routers[north_id],
node_b=routers[south_id],
weight=1, latency=0))
link_count += 1
network.int_links = int_links
| {
"content_hash": "3383d5a67b4d0e0364cc1fe809631b1c",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 81,
"avg_line_length": 39.98876404494382,
"alnum_prop": 0.5268333801629671,
"repo_name": "wnoc-drexel/gem5-stable",
"id": "e715af8557c6aca7dbe6dd0e05fd6ba758fd040c",
"size": "5121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "configs/topologies/Mesh.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "239800"
},
{
"name": "C",
"bytes": "957228"
},
{
"name": "C++",
"bytes": "13915041"
},
{
"name": "CSS",
"bytes": "9813"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "Groff",
"bytes": "11130043"
},
{
"name": "HTML",
"bytes": "132838214"
},
{
"name": "Java",
"bytes": "3096"
},
{
"name": "Makefile",
"bytes": "20709"
},
{
"name": "PHP",
"bytes": "10107"
},
{
"name": "Perl",
"bytes": "36183"
},
{
"name": "Protocol Buffer",
"bytes": "3246"
},
{
"name": "Python",
"bytes": "3739380"
},
{
"name": "Shell",
"bytes": "49333"
},
{
"name": "Visual Basic",
"bytes": "2884"
}
],
"symlink_target": ""
} |
"""
Model of discount, needed when client pays for purchase.
"""
from mgnemu.models.base_model import BaseModel
from mgnemu.models.sales_types import DISCOUNT
class Discount(BaseModel):
def __init__(self, data):
BaseModel.__init__(self, DISCOUNT)
self.__sum = data['sum']
if 'prc' in list(data.keys()):
self.__prc = data['prc']
else:
self.__prc = 0
@property
def sum(self):
return self.__sum
@property
def prc(self):
return self.__prc
def dumps(self):
return {
self.model_type: {
'sum': self.__sum,
'prc': self.__prc
}
}
| {
"content_hash": "a5dc57968b63682c162b8b93038b4e14",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 56,
"avg_line_length": 20.5,
"alnum_prop": 0.5164992826398852,
"repo_name": "0xporky/mgnemu-python",
"id": "96b9309aa2fa1cb144b03976f08a6b76a8609457",
"size": "722",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mgnemu/models/discount.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26969"
}
],
"symlink_target": ""
} |
__author__ = 'wdwind'
from os import walk
from SingleFile.ClusterCenter import cluster_rank_rank
def wrapper(data_path, cluster_path, index_path, output_path):
"""
A wrapper for the ClusterCenter.cluster_rank_rank.
Clustering results post processing.
Do double rank
- rank by the number of points in the clusters
- rank by the distances between every point in a cluster and the cluster center
@param data_path:
@param cluster_path:
@param index_path:
@param output_path:
"""
files = []
for (dir_path, dir_names, file_names) in walk(data_path):
files.extend(file_names)
break
for filename in files:
data_file = data_path + "\\" + filename
cluster_file = cluster_path + "\\" + filename[:len(filename) - 4] + ".clusters"
index_file = index_path + "\\" + filename[:len(filename) - 4] + ".index"
output_file = output_path + "\\" + filename[:len(filename) - 4] + ".out"
print data_path + "\\" + filename
print cluster_path + "\\" + filename[:len(filename) - 4] + ".clusters"
print index_path + "\\" + filename[:len(filename) - 4] + ".index"
print output_path + "\\" + filename[:len(filename) - 4] + ".out"
cluster_rank_rank(data_file, cluster_file, index_file, output_file)
if __name__ == '__main__':
wrapper(u"C:\\Users\\wdwind\\SkyDrive\\文档\\clustering008\\CSV",
u"C:\\Users\\wdwind\\SkyDrive\\文档\\clustering008\\results_C_clustering",
u"C:\\Users\\wdwind\\SkyDrive\\文档\\clustering008\\index",
u"C:\\Users\\wdwind\\SkyDrive\\文档\\clustering008\\Ranked_Clusters_Cut")
| {
"content_hash": "74537c3f3b6af8bee38ad6f4e0f54520",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 87,
"avg_line_length": 37,
"alnum_prop": 0.6096096096096096,
"repo_name": "wdwind/ImageTrends",
"id": "83ea335b94f11049a2fe88c605fcc50470ca175d",
"size": "1707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/PyClustering/ClusterCenterWrapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7139"
},
{
"name": "Java",
"bytes": "98785"
},
{
"name": "PHP",
"bytes": "27675"
},
{
"name": "PigLatin",
"bytes": "1298"
},
{
"name": "Python",
"bytes": "22257"
}
],
"symlink_target": ""
} |
import argparse
import cv2
import mmcv
from mmdet.apis import inference_detector, init_detector
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection video demo')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
model = init_detector(args.config, args.checkpoint, device=args.device)
video_reader = mmcv.VideoReader(args.video)
video_writer = None
if args.out:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_writer = cv2.VideoWriter(
args.out, fourcc, video_reader.fps,
(video_reader.width, video_reader.height))
for frame in mmcv.track_iter_progress(video_reader):
result = inference_detector(model, frame)
frame = model.show_result(frame, result, score_thr=args.score_thr)
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame, 'video', args.wait_time)
if args.out:
video_writer.write(frame)
if video_writer:
video_writer.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| {
"content_hash": "e84b868c33b24a866c11c9ed33ade559",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 76,
"avg_line_length": 32.1,
"alnum_prop": 0.6334371754932503,
"repo_name": "open-mmlab/mmdetection",
"id": "4ee1fa675163a0fb8b407f1cff67fab23feb65bc",
"size": "1974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/video_demo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2540"
},
{
"name": "Python",
"bytes": "4811377"
},
{
"name": "Shell",
"bytes": "47911"
}
],
"symlink_target": ""
} |
import cStringIO
import os
import sys
import traceback
import py
from rpython.flowspace.model import (FunctionGraph, Constant, Variable)
from rpython.rlib import rstackovf
from rpython.rlib.objectmodel import (ComputedIntSymbolic, CDefinedIntSymbolic,
Symbolic)
# intmask is used in an exec'd code block
from rpython.rlib.rarithmetic import (ovfcheck, is_valid_int, intmask,
r_uint, r_longlong, r_ulonglong, r_longlonglong)
from rpython.rtyper.lltypesystem import lltype, llmemory, lloperation, llheap
from rpython.rtyper import rclass
log = py.log.Producer('llinterp')
class LLException(Exception):
def __init__(self, *args):
"NOT_RPYTHON"
Exception.__init__(self, *args)
def __str__(self):
etype = self.args[0]
#evalue = self.args[1]
if len(self.args) > 2:
f = cStringIO.StringIO()
original_type, original_value, original_tb = self.args[2]
traceback.print_exception(original_type, original_value, original_tb,
file=f)
extra = '\n' + f.getvalue().rstrip('\n')
extra = extra.replace('\n', '\n | ') + '\n `------'
else:
extra = ''
return '<LLException %r%s>' % (type_name(etype), extra)
class LLFatalError(Exception):
def __str__(self):
return ': '.join([str(x) for x in self.args])
def type_name(etype):
return ''.join(etype.name.chars)
class LLInterpreter(object):
""" low level interpreter working with concrete values. """
current_interpreter = None
def __init__(self, typer, tracing=True, exc_data_ptr=None):
self.bindings = {}
self.typer = typer
# 'heap' is module or object that provides malloc, etc for lltype ops
self.heap = llheap
self.exc_data_ptr = exc_data_ptr
self.frame_stack = []
self.tracer = None
self.frame_class = LLFrame
if tracing:
self.tracer = Tracer()
def eval_graph(self, graph, args=(), recursive=False):
llframe = self.frame_class(graph, args, self)
if self.tracer and not recursive:
global tracer1
tracer1 = self.tracer
self.tracer.start()
retval = None
self.traceback_frames = []
old_frame_stack = self.frame_stack[:]
prev_interpreter = LLInterpreter.current_interpreter
LLInterpreter.current_interpreter = self
try:
try:
retval = llframe.eval()
except LLException, e:
log.error("LLEXCEPTION: %s" % (e, ))
self.print_traceback()
if self.tracer:
self.tracer.dump('LLException: %s\n' % (e,))
raise
except Exception, e:
if getattr(e, '_go_through_llinterp_uncaught_', False):
raise
log.error("AN ERROR OCCURED: %s" % (e, ))
self.print_traceback()
if self.tracer:
line = str(e)
if line:
line = ': ' + line
line = '* %s' % (e.__class__.__name__,) + line
self.tracer.dump(line + '\n')
raise
finally:
LLInterpreter.current_interpreter = prev_interpreter
assert old_frame_stack == self.frame_stack
if self.tracer:
if retval is not None:
self.tracer.dump(' ---> %r\n' % (retval,))
if not recursive:
self.tracer.stop()
return retval
def print_traceback(self):
frames = self.traceback_frames
frames.reverse()
self.traceback_frames = []
lines = []
for frame in frames:
logline = frame.graph.name + "()"
if frame.curr_block is None:
logline += " <not running yet>"
lines.append(logline)
continue
try:
logline += " " + self.typer.annotator.annotated[frame.curr_block].func.__module__
except (KeyError, AttributeError, TypeError):
logline += " <unknown module>"
lines.append(logline)
for i, operation in enumerate(frame.curr_block.operations):
if i == frame.curr_operation_index:
logline = "E %s"
else:
logline = " %s"
lines.append(logline % (operation, ))
if self.tracer:
self.tracer.dump('Traceback\n', bold=True)
for line in lines:
self.tracer.dump(line + '\n')
for line in lines:
log.traceback(line)
def get_tlobj(self):
try:
return self._tlobj
except AttributeError:
from rpython.rtyper.lltypesystem import rffi
PERRNO = rffi.CArrayPtr(rffi.INT)
fake_p_errno = lltype.malloc(PERRNO.TO, 1, flavor='raw', zero=True,
track_allocation=False)
self._tlobj = {'RPY_TLOFS_p_errno': fake_p_errno,
#'thread_ident': ...,
}
return self._tlobj
def find_roots(self, is_minor=False):
"""Return a list of the addresses of the roots."""
#log.findroots("starting")
roots = []
for frame in reversed(self.frame_stack):
#log.findroots("graph", frame.graph.name)
frame.find_roots(roots)
# If a call is done with 'is_minor=True', we can stop after the
# first frame in the stack that was already seen by the previous
# call with 'is_minor=True'. (We still need to trace that frame,
# but not its callers.)
if is_minor:
if getattr(frame, '_find_roots_already_seen', False):
break
frame._find_roots_already_seen = True
return roots
def find_exception(self, exc):
assert isinstance(exc, LLException)
klass, inst = exc.args[0], exc.args[1]
for cls in enumerate_exceptions_top_down():
if "".join(klass.name.chars) == cls.__name__:
return cls
raise ValueError("couldn't match exception, maybe it"
" has RPython attributes like OSError?")
def get_transformed_exc_data(self, graph):
if hasattr(graph, 'exceptiontransformed'):
return graph.exceptiontransformed
if getattr(graph, 'rgenop', False):
return self.exc_data_ptr
return None
def _store_exception(self, exc):
raise PleaseOverwriteStoreException("You just invoked ll2ctypes callback without overwriting _store_exception on llinterpreter")
class PleaseOverwriteStoreException(Exception):
pass
def checkptr(ptr):
assert isinstance(lltype.typeOf(ptr), lltype.Ptr)
def checkadr(addr):
assert lltype.typeOf(addr) is llmemory.Address
class LLFrame(object):
def __init__(self, graph, args, llinterpreter):
assert not graph or isinstance(graph, FunctionGraph)
self.graph = graph
self.args = args
self.llinterpreter = llinterpreter
self.heap = llinterpreter.heap
self.bindings = {}
self.curr_block = None
self.curr_operation_index = 0
self.alloca_objects = []
def newsubframe(self, graph, args):
return self.__class__(graph, args, self.llinterpreter)
# _______________________________________________________
# variable setters/getters helpers
def clear(self):
self.bindings.clear()
def fillvars(self, block, values):
vars = block.inputargs
assert len(vars) == len(values), (
"block %s received %d args, expected %d" % (
block, len(values), len(vars)))
for var, val in zip(vars, values):
self.setvar(var, val)
def setvar(self, var, val):
if var.concretetype is not lltype.Void:
try:
val = lltype.enforce(var.concretetype, val)
except TypeError:
assert False, "type error: input value of type:\n\n\t%r\n\n===> variable of type:\n\n\t%r\n" % (lltype.typeOf(val), var.concretetype)
assert isinstance(var, Variable)
self.bindings[var] = val
def setifvar(self, var, val):
if isinstance(var, Variable):
self.setvar(var, val)
def getval(self, varorconst):
try:
val = varorconst.value
except AttributeError:
val = self.bindings[varorconst]
if isinstance(val, ComputedIntSymbolic):
val = val.compute_fn()
if varorconst.concretetype is not lltype.Void:
try:
val = lltype.enforce(varorconst.concretetype, val)
except TypeError:
assert False, "type error: %r val from %r var/const" % (lltype.typeOf(val), varorconst.concretetype)
return val
# _______________________________________________________
# other helpers
def getoperationhandler(self, opname):
ophandler = getattr(self, 'op_' + opname, None)
if ophandler is None:
# try to import the operation from opimpl.py
ophandler = lloperation.LL_OPERATIONS[opname].fold
setattr(self.__class__, 'op_' + opname, staticmethod(ophandler))
return ophandler
# _______________________________________________________
# evaling functions
def eval(self):
graph = self.graph
tracer = self.llinterpreter.tracer
if tracer:
tracer.enter(graph)
self.llinterpreter.frame_stack.append(self)
try:
try:
nextblock = graph.startblock
args = self.args
while 1:
self.clear()
self.fillvars(nextblock, args)
nextblock, args = self.eval_block(nextblock)
if nextblock is None:
for obj in self.alloca_objects:
obj._obj._free()
return args
except Exception:
self.llinterpreter.traceback_frames.append(self)
raise
finally:
leavingframe = self.llinterpreter.frame_stack.pop()
assert leavingframe is self
if tracer:
tracer.leave()
def eval_block(self, block):
""" return (nextblock, values) tuple. If nextblock
is None, values is the concrete return value.
"""
self.curr_block = block
e = None
try:
for i, op in enumerate(block.operations):
self.curr_operation_index = i
self.eval_operation(op)
except LLException, e:
if op is not block.raising_op:
raise
except RuntimeError, e:
rstackovf.check_stack_overflow()
# xxx fish fish fish for proper etype and evalue to use
rtyper = self.llinterpreter.typer
bk = rtyper.annotator.bookkeeper
classdef = bk.getuniqueclassdef(rstackovf._StackOverflow)
exdata = rtyper.exceptiondata
evalue = exdata.get_standard_ll_exc_instance(rtyper, classdef)
etype = exdata.fn_type_of_exc_inst(evalue)
e = LLException(etype, evalue)
if op is not block.raising_op:
raise e
# determine nextblock and/or return value
if len(block.exits) == 0:
# return block
tracer = self.llinterpreter.tracer
if len(block.inputargs) == 2:
# exception
if tracer:
tracer.dump('raise')
etypevar, evaluevar = block.getvariables()
etype = self.getval(etypevar)
evalue = self.getval(evaluevar)
# watch out, these are _ptr's
raise LLException(etype, evalue)
resultvar, = block.getvariables()
result = self.getval(resultvar)
exc_data = self.llinterpreter.get_transformed_exc_data(self.graph)
if exc_data:
# re-raise the exception set by this graph, if any
etype = exc_data.exc_type
if etype:
evalue = exc_data.exc_value
if tracer:
tracer.dump('raise')
exc_data.exc_type = lltype.typeOf(etype)._defl()
exc_data.exc_value = lltype.typeOf(evalue)._defl()
from rpython.translator import exceptiontransform
T = resultvar.concretetype
errvalue = exceptiontransform.error_value(T)
# check that the exc-transformed graph returns the error
# value when it returns with an exception set
assert result == errvalue
raise LLException(etype, evalue)
if tracer:
tracer.dump('return')
return None, result
elif block.exitswitch is None:
# single-exit block
assert len(block.exits) == 1
link = block.exits[0]
elif block.canraise:
link = block.exits[0]
if e:
exdata = self.llinterpreter.typer.exceptiondata
cls = e.args[0]
inst = e.args[1]
for link in block.exits[1:]:
assert issubclass(link.exitcase, py.builtin.BaseException)
if self.op_direct_call(exdata.fn_exception_match,
cls, link.llexitcase):
self.setifvar(link.last_exception, cls)
self.setifvar(link.last_exc_value, inst)
break
else:
# no handler found, pass on
raise e
else:
llexitvalue = self.getval(block.exitswitch)
if block.exits[-1].exitcase == "default":
defaultexit = block.exits[-1]
nondefaultexits = block.exits[:-1]
assert defaultexit.llexitcase is None
else:
defaultexit = None
nondefaultexits = block.exits
for link in nondefaultexits:
if link.llexitcase == llexitvalue:
break # found -- the result is in 'link'
else:
if defaultexit is None:
raise ValueError("exit case %r not found in the exit links "
"of %r" % (llexitvalue, block))
else:
link = defaultexit
return link.target, [self.getval(x) for x in link.args]
def eval_operation(self, operation):
tracer = self.llinterpreter.tracer
if tracer:
tracer.dump(str(operation))
ophandler = self.getoperationhandler(operation.opname)
# XXX slighly unnice but an important safety check
if operation.opname == 'direct_call':
assert isinstance(operation.args[0], Constant)
elif operation.opname == 'indirect_call':
assert isinstance(operation.args[0], Variable)
if getattr(ophandler, 'specialform', False):
retval = ophandler(*operation.args)
else:
vals = [self.getval(x) for x in operation.args]
if getattr(ophandler, 'need_result_type', False):
vals.insert(0, operation.result.concretetype)
try:
retval = ophandler(*vals)
except LLException, e:
# safety check check that the operation is allowed to raise that
# exception
if operation.opname in lloperation.LL_OPERATIONS:
canraise = lloperation.LL_OPERATIONS[operation.opname].canraise
if Exception not in canraise:
exc = self.llinterpreter.find_exception(e)
for canraiseexc in canraise:
if issubclass(exc, canraiseexc):
break
else:
raise TypeError("the operation %s is not expected to raise %s" % (operation, exc))
# for exception-transformed graphs, store the LLException
# into the exc_data used by this graph
exc_data = self.llinterpreter.get_transformed_exc_data(
self.graph)
if exc_data:
etype = e.args[0]
evalue = e.args[1]
exc_data.exc_type = etype
exc_data.exc_value = evalue
from rpython.translator import exceptiontransform
retval = exceptiontransform.error_value(
operation.result.concretetype)
else:
raise
self.setvar(operation.result, retval)
if tracer:
if retval is None:
tracer.dump('\n')
else:
tracer.dump(' ---> %r\n' % (retval,))
def make_llexception(self, exc=None):
if exc is None:
original = sys.exc_info()
exc = original[1]
# it makes no sense to convert some exception classes that
# just mean something buggy crashed
if isinstance(exc, (AssertionError, AttributeError,
TypeError, NameError,
KeyboardInterrupt, SystemExit,
ImportError, SyntaxError)):
raise original[0], original[1], original[2] # re-raise it
# for testing the JIT (see ContinueRunningNormally) we need
# to let some exceptions introduced by the JIT go through
# the llinterpreter uncaught
if getattr(exc, '_go_through_llinterp_uncaught_', False):
raise original[0], original[1], original[2] # re-raise it
extraargs = (original,)
else:
extraargs = ()
typer = self.llinterpreter.typer
exdata = typer.exceptiondata
evalue = exdata.get_standard_ll_exc_instance_by_class(exc.__class__)
etype = self.op_direct_call(exdata.fn_type_of_exc_inst, evalue)
raise LLException(etype, evalue, *extraargs)
def invoke_callable_with_pyexceptions(self, fptr, *args):
obj = fptr._obj
try:
return obj._callable(*args)
except LLException, e:
raise
except Exception, e:
if getattr(e, '_go_through_llinterp_uncaught_', False):
raise
if getattr(obj, '_debugexc', False):
log.ERROR('The llinterpreter got an '
'unexpected exception when calling')
log.ERROR('the external function %r:' % (fptr,))
log.ERROR('%s: %s' % (e.__class__.__name__, e))
if self.llinterpreter.tracer:
self.llinterpreter.tracer.flush()
import sys
from rpython.translator.tool.pdbplus import PdbPlusShow
PdbPlusShow(None).post_mortem(sys.exc_info()[2])
self.make_llexception()
def find_roots(self, roots):
#log.findroots(self.curr_block.inputargs)
vars = []
for v in self.curr_block.inputargs:
if isinstance(v, Variable):
vars.append(v)
for op in self.curr_block.operations[:self.curr_operation_index]:
vars.append(op.result)
for v in vars:
TYPE = v.concretetype
if isinstance(TYPE, lltype.Ptr) and TYPE.TO._gckind == 'gc':
roots.append(_address_of_local_var(self, v))
# __________________________________________________________
# misc LL operation implementations
def op_debug_view(self, *ll_objects):
from rpython.translator.tool.lltracker import track
track(*ll_objects)
def op_debug_assert(self, x, msg):
assert x, msg
def op_debug_fatalerror(self, ll_msg, ll_exc=None):
msg = ''.join(ll_msg.chars)
if ll_exc is None:
raise LLFatalError(msg)
else:
ll_exc_type = lltype.cast_pointer(rclass.OBJECTPTR, ll_exc).typeptr
raise LLFatalError(msg, LLException(ll_exc_type, ll_exc))
def op_debug_llinterpcall(self, pythonfunction, *args_ll):
try:
return pythonfunction(*args_ll)
except:
self.make_llexception()
def op_debug_forked(self, *args):
raise NotImplementedError
def op_debug_start_traceback(self, *args):
pass # xxx write debugging code here?
def op_debug_reraise_traceback(self, *args):
pass # xxx write debugging code here?
def op_debug_record_traceback(self, *args):
pass # xxx write debugging code here?
def op_debug_print_traceback(self, *args):
pass # xxx write debugging code here?
def op_debug_catch_exception(self, *args):
pass # xxx write debugging code here?
def op_jit_marker(self, *args):
pass
def op_jit_record_exact_class(self, *args):
pass
def op_jit_conditional_call(self, *args):
raise NotImplementedError("should not be called while not jitted")
def op_get_exception_addr(self, *args):
raise NotImplementedError
def op_get_exc_value_addr(self, *args):
raise NotImplementedError
def op_instrument_count(self, ll_tag, ll_label):
pass # xxx for now
def op_keepalive(self, value):
pass
def op_hint(self, x, hints):
return x
def op_decode_arg(self, fname, i, name, vargs, vkwds):
raise NotImplementedError("decode_arg")
def op_decode_arg_def(self, fname, i, name, vargs, vkwds, default):
raise NotImplementedError("decode_arg_def")
def op_check_no_more_arg(self, fname, n, vargs):
raise NotImplementedError("check_no_more_arg")
def op_getslice(self, vargs, start, stop_should_be_None):
raise NotImplementedError("getslice") # only for argument parsing
def op_check_self_nonzero(self, fname, vself):
raise NotImplementedError("check_self_nonzero")
def op_setfield(self, obj, fieldname, fieldvalue):
# obj should be pointer
FIELDTYPE = getattr(lltype.typeOf(obj).TO, fieldname)
if FIELDTYPE is not lltype.Void:
self.heap.setfield(obj, fieldname, fieldvalue)
def op_bare_setfield(self, obj, fieldname, fieldvalue):
# obj should be pointer
FIELDTYPE = getattr(lltype.typeOf(obj).TO, fieldname)
if FIELDTYPE is not lltype.Void:
setattr(obj, fieldname, fieldvalue)
def op_getinteriorfield(self, obj, *offsets):
checkptr(obj)
ob = obj
for o in offsets:
if isinstance(o, str):
ob = getattr(ob, o)
else:
ob = ob[o]
assert not isinstance(ob, lltype._interior_ptr)
return ob
def getinneraddr(self, obj, *offsets):
TYPE = lltype.typeOf(obj).TO
addr = llmemory.cast_ptr_to_adr(obj)
for o in offsets:
if isinstance(o, str):
addr += llmemory.offsetof(TYPE, o)
TYPE = getattr(TYPE, o)
else:
addr += llmemory.itemoffsetof(TYPE, o)
TYPE = TYPE.OF
return addr, TYPE
def op_setinteriorfield(self, obj, *fieldnamesval):
offsets, fieldvalue = fieldnamesval[:-1], fieldnamesval[-1]
inneraddr, FIELD = self.getinneraddr(obj, *offsets)
if FIELD is not lltype.Void:
self.heap.setinterior(obj, inneraddr, FIELD, fieldvalue, offsets)
def op_bare_setinteriorfield(self, obj, *fieldnamesval):
offsets, fieldvalue = fieldnamesval[:-1], fieldnamesval[-1]
inneraddr, FIELD = self.getinneraddr(obj, *offsets)
if FIELD is not lltype.Void:
llheap.setinterior(obj, inneraddr, FIELD, fieldvalue)
def op_getarrayitem(self, array, index):
return array[index]
def op_setarrayitem(self, array, index, item):
# array should be a pointer
ITEMTYPE = lltype.typeOf(array).TO.OF
if ITEMTYPE is not lltype.Void:
self.heap.setarrayitem(array, index, item)
def op_bare_setarrayitem(self, array, index, item):
# array should be a pointer
ITEMTYPE = lltype.typeOf(array).TO.OF
if ITEMTYPE is not lltype.Void:
array[index] = item
def perform_call(self, f, ARGS, args):
fobj = f._obj
has_callable = getattr(fobj, '_callable', None) is not None
if hasattr(fobj, 'graph'):
graph = fobj.graph
else:
assert has_callable, "don't know how to execute %r" % f
return self.invoke_callable_with_pyexceptions(f, *args)
args_v = graph.getargs()
if len(ARGS) != len(args_v):
raise TypeError("graph with %d args called with wrong func ptr type: %r" %(len(args_v), ARGS))
for T, v in zip(ARGS, args_v):
if not lltype.isCompatibleType(T, v.concretetype):
raise TypeError("graph with %r args called with wrong func ptr type: %r" %
(tuple([v.concretetype for v in args_v]), ARGS))
frame = self.newsubframe(graph, args)
return frame.eval()
def op_direct_call(self, f, *args):
FTYPE = lltype.typeOf(f).TO
return self.perform_call(f, FTYPE.ARGS, args)
def op_indirect_call(self, f, *args):
graphs = args[-1]
args = args[:-1]
if graphs is not None:
obj = f._obj
if hasattr(obj, 'graph'):
assert obj.graph in graphs
else:
pass
#log.warn("op_indirect_call with graphs=None:", f)
return self.op_direct_call(f, *args)
def op_malloc(self, obj, flags):
flavor = flags['flavor']
zero = flags.get('zero', False)
track_allocation = flags.get('track_allocation', True)
if flavor == "stack":
result = self.heap.malloc(obj, zero=zero, flavor='raw')
self.alloca_objects.append(result)
return result
ptr = self.heap.malloc(obj, zero=zero, flavor=flavor,
track_allocation=track_allocation)
return ptr
def op_malloc_varsize(self, obj, flags, size):
flavor = flags['flavor']
zero = flags.get('zero', False)
track_allocation = flags.get('track_allocation', True)
assert flavor in ('gc', 'raw')
try:
ptr = self.heap.malloc(obj, size, zero=zero, flavor=flavor,
track_allocation=track_allocation)
return ptr
except MemoryError:
self.make_llexception()
def op_free(self, obj, flags):
assert flags['flavor'] == 'raw'
track_allocation = flags.get('track_allocation', True)
self.heap.free(obj, flavor='raw', track_allocation=track_allocation)
def op_gc_add_memory_pressure(self, size):
self.heap.add_memory_pressure(size)
def op_gc_gettypeid(self, obj):
return lloperation.llop.combine_ushort(lltype.Signed, self.heap.gettypeid(obj), 0)
def op_shrink_array(self, obj, smallersize):
return self.heap.shrink_array(obj, smallersize)
def op_zero_gc_pointers_inside(self, obj):
raise NotImplementedError("zero_gc_pointers_inside")
def op_gc_writebarrier_before_copy(self, source, dest,
source_start, dest_start, length):
if hasattr(self.heap, 'writebarrier_before_copy'):
return self.heap.writebarrier_before_copy(source, dest,
source_start, dest_start,
length)
else:
return True
def op_getfield(self, obj, field):
checkptr(obj)
# check the difference between op_getfield and op_getsubstruct:
assert not isinstance(getattr(lltype.typeOf(obj).TO, field),
lltype.ContainerType)
return getattr(obj, field)
def op_force_cast(self, RESTYPE, obj):
from rpython.rtyper.lltypesystem import ll2ctypes
return ll2ctypes.force_cast(RESTYPE, obj)
op_force_cast.need_result_type = True
def op_cast_int_to_ptr(self, RESTYPE, int1):
return lltype.cast_int_to_ptr(RESTYPE, int1)
op_cast_int_to_ptr.need_result_type = True
def op_cast_ptr_to_int(self, ptr1):
checkptr(ptr1)
return lltype.cast_ptr_to_int(ptr1)
def op_cast_opaque_ptr(self, RESTYPE, obj):
checkptr(obj)
return lltype.cast_opaque_ptr(RESTYPE, obj)
op_cast_opaque_ptr.need_result_type = True
def op_length_of_simple_gcarray_from_opaque(self, obj):
checkptr(obj)
return lltype.length_of_simple_gcarray_from_opaque(obj)
def op_cast_ptr_to_adr(self, ptr):
checkptr(ptr)
return llmemory.cast_ptr_to_adr(ptr)
def op_cast_adr_to_int(self, adr, mode):
checkadr(adr)
return llmemory.cast_adr_to_int(adr, mode)
def op_convert_float_bytes_to_longlong(self, f):
from rpython.rlib import longlong2float
return longlong2float.float2longlong(f)
def op_weakref_create(self, v_obj):
def objgetter(): # special support for gcwrapper.py
return self.getval(v_obj)
return self.heap.weakref_create_getlazy(objgetter)
op_weakref_create.specialform = True
def op_weakref_deref(self, PTRTYPE, obj):
return self.heap.weakref_deref(PTRTYPE, obj)
op_weakref_deref.need_result_type = True
def op_cast_ptr_to_weakrefptr(self, obj):
return llmemory.cast_ptr_to_weakrefptr(obj)
def op_cast_weakrefptr_to_ptr(self, PTRTYPE, obj):
return llmemory.cast_weakrefptr_to_ptr(PTRTYPE, obj)
op_cast_weakrefptr_to_ptr.need_result_type = True
def op_gc__collect(self, *gen):
self.heap.collect(*gen)
def op_gc_heap_stats(self):
raise NotImplementedError
def op_gc_obtain_free_space(self, size):
raise NotImplementedError
def op_gc_can_move(self, ptr):
addr = llmemory.cast_ptr_to_adr(ptr)
return self.heap.can_move(addr)
def op_gc_thread_run(self):
self.heap.thread_run()
def op_gc_thread_start(self):
self.heap.thread_start()
def op_gc_thread_die(self):
self.heap.thread_die()
def op_gc_thread_before_fork(self):
raise NotImplementedError
def op_gc_thread_after_fork(self):
raise NotImplementedError
def op_gc_free(self, addr):
# what can you do?
pass
#raise NotImplementedError("gc_free")
def op_gc_fetch_exception(self):
raise NotImplementedError("gc_fetch_exception")
def op_gc_restore_exception(self, exc):
raise NotImplementedError("gc_restore_exception")
def op_gc_adr_of_nursery_top(self):
raise NotImplementedError
def op_gc_adr_of_nursery_free(self):
raise NotImplementedError
def op_gc_adr_of_root_stack_base(self):
raise NotImplementedError
def op_gc_adr_of_root_stack_top(self):
raise NotImplementedError
def op_gc_call_rtti_destructor(self, rtti, addr):
if hasattr(rtti._obj, 'destructor_funcptr'):
d = rtti._obj.destructor_funcptr
obptr = addr.ref()
return self.op_direct_call(d, obptr)
def op_gc_deallocate(self, TYPE, addr):
raise NotImplementedError("gc_deallocate")
def op_gc_reload_possibly_moved(self, v_newaddr, v_ptr):
assert v_newaddr.concretetype is llmemory.Address
assert isinstance(v_ptr.concretetype, lltype.Ptr)
assert v_ptr.concretetype.TO._gckind == 'gc'
newaddr = self.getval(v_newaddr)
p = llmemory.cast_adr_to_ptr(newaddr, v_ptr.concretetype)
if isinstance(v_ptr, Constant):
assert v_ptr.value == p
else:
self.setvar(v_ptr, p)
op_gc_reload_possibly_moved.specialform = True
def op_gc_identityhash(self, obj):
return lltype.identityhash(obj)
def op_gc_id(self, ptr):
PTR = lltype.typeOf(ptr)
if isinstance(PTR, lltype.Ptr):
return self.heap.gc_id(ptr)
raise NotImplementedError("gc_id on %r" % (PTR,))
def op_gc_set_max_heap_size(self, maxsize):
raise NotImplementedError("gc_set_max_heap_size")
def op_gc_asmgcroot_static(self, index):
raise NotImplementedError("gc_asmgcroot_static")
def op_gc_stack_bottom(self):
pass # marker for trackgcroot.py
def op_gc_pin(self, obj):
addr = llmemory.cast_ptr_to_adr(obj)
return self.heap.pin(addr)
def op_gc_unpin(self, obj):
addr = llmemory.cast_ptr_to_adr(obj)
self.heap.unpin(addr)
def op_gc__is_pinned(self, obj):
addr = llmemory.cast_ptr_to_adr(obj)
return self.heap._is_pinned(addr)
def op_gc_detach_callback_pieces(self):
raise NotImplementedError("gc_detach_callback_pieces")
def op_gc_reattach_callback_pieces(self):
raise NotImplementedError("gc_reattach_callback_pieces")
def op_gc_get_type_info_group(self):
raise NotImplementedError("gc_get_type_info_group")
def op_gc_get_rpy_memory_usage(self):
raise NotImplementedError("gc_get_rpy_memory_usage")
def op_gc_get_rpy_roots(self):
raise NotImplementedError("gc_get_rpy_roots")
def op_gc_get_rpy_referents(self):
raise NotImplementedError("gc_get_rpy_referents")
def op_gc_is_rpy_instance(self):
raise NotImplementedError("gc_is_rpy_instance")
def op_gc_get_rpy_type_index(self):
raise NotImplementedError("gc_get_rpy_type_index")
def op_gc_dump_rpy_heap(self):
raise NotImplementedError("gc_dump_rpy_heap")
def op_gc_typeids_z(self):
raise NotImplementedError("gc_typeids_z")
def op_gc_typeids_list(self):
raise NotImplementedError("gc_typeids_list")
def op_gc_gcflag_extra(self, subopnum, *args):
return self.heap.gcflag_extra(subopnum, *args)
def op_do_malloc_fixedsize(self):
raise NotImplementedError("do_malloc_fixedsize")
def op_do_malloc_fixedsize_clear(self):
raise NotImplementedError("do_malloc_fixedsize_clear")
def op_do_malloc_varsize(self):
raise NotImplementedError("do_malloc_varsize")
def op_do_malloc_varsize_clear(self):
raise NotImplementedError("do_malloc_varsize_clear")
def op_get_write_barrier_failing_case(self):
raise NotImplementedError("get_write_barrier_failing_case")
def op_get_write_barrier_from_array_failing_case(self):
raise NotImplementedError("get_write_barrier_from_array_failing_case")
def op_stack_current(self):
return 0
def op_threadlocalref_addr(self):
return _address_of_thread_local()
def op_threadlocalref_get(self, RESTYPE, offset):
return self.op_raw_load(RESTYPE, _address_of_thread_local(), offset)
op_threadlocalref_get.need_result_type = True
# __________________________________________________________
# operations on addresses
def op_raw_malloc(self, size):
assert lltype.typeOf(size) == lltype.Signed
return llmemory.raw_malloc(size)
op_boehm_malloc = op_boehm_malloc_atomic = op_raw_malloc
def op_boehm_register_finalizer(self, p, finalizer):
pass
def op_boehm_disappearing_link(self, link, obj):
pass
def op_raw_malloc_usage(self, size):
assert lltype.typeOf(size) == lltype.Signed
return llmemory.raw_malloc_usage(size)
def op_raw_free(self, addr):
checkadr(addr)
llmemory.raw_free(addr)
def op_raw_memclear(self, addr, size):
checkadr(addr)
llmemory.raw_memclear(addr, size)
def op_raw_memcopy(self, fromaddr, toaddr, size):
checkadr(fromaddr)
checkadr(toaddr)
llmemory.raw_memcopy(fromaddr, toaddr, size)
def op_raw_memset(self, addr, byte, size):
raise NotImplementedError
op_raw_memmove = op_raw_memcopy # this is essentially the same here
def op_raw_load(self, RESTYPE, addr, offset):
checkadr(addr)
if isinstance(offset, int):
from rpython.rtyper.lltypesystem import rffi
ll_p = rffi.cast(rffi.CCHARP, addr)
ll_p = rffi.cast(rffi.CArrayPtr(RESTYPE),
rffi.ptradd(ll_p, offset))
value = ll_p[0]
elif getattr(addr, 'is_fake_thread_local_addr', False):
assert type(offset) is CDefinedIntSymbolic
value = self.llinterpreter.get_tlobj()[offset.expr]
else:
assert offset.TYPE == RESTYPE
value = getattr(addr, str(RESTYPE).lower())[offset.repeat]
assert lltype.typeOf(value) == RESTYPE
return value
op_raw_load.need_result_type = True
def op_raw_store(self, addr, offset, value):
# XXX handle the write barrier by delegating to self.heap instead
self.op_bare_raw_store(addr, offset, value)
def op_bare_raw_store(self, addr, offset, value):
checkadr(addr)
ARGTYPE = lltype.typeOf(value)
if isinstance(offset, int):
from rpython.rtyper.lltypesystem import rffi
ll_p = rffi.cast(rffi.CCHARP, addr)
ll_p = rffi.cast(rffi.CArrayPtr(ARGTYPE),
rffi.ptradd(ll_p, offset))
ll_p[0] = value
elif getattr(addr, 'is_fake_thread_local_addr', False):
assert type(offset) is CDefinedIntSymbolic
self.llinterpreter.get_tlobj()[offset.expr] = value
else:
assert offset.TYPE == ARGTYPE
getattr(addr, str(ARGTYPE).lower())[offset.repeat] = value
def op_stack_malloc(self, size): # mmh
raise NotImplementedError("backend only")
def op_track_alloc_start(self, addr):
# we don't do tracking at this level
checkadr(addr)
def op_track_alloc_stop(self, addr):
checkadr(addr)
# ____________________________________________________________
# Overflow-detecting variants
def op_int_neg_ovf(self, x):
assert is_valid_int(x)
try:
return ovfcheck(-x)
except OverflowError:
self.make_llexception()
def op_int_abs_ovf(self, x):
assert is_valid_int(x)
try:
return ovfcheck(abs(x))
except OverflowError:
self.make_llexception()
def op_int_lshift_ovf(self, x, y):
assert is_valid_int(x)
assert is_valid_int(y)
try:
return ovfcheck(x << y)
except OverflowError:
self.make_llexception()
def _makefunc2(fn, operator, xtype, ytype=None):
import sys
d = sys._getframe(1).f_locals
if ytype is None:
ytype = xtype
if '_ovf' in fn:
checkfn = 'ovfcheck'
elif fn.startswith('op_int_'):
checkfn = 'intmask'
else:
checkfn = ''
if operator == '//':
code = '''r = %(checkfn)s(x // y)
if x^y < 0 and x%%y != 0:
r += 1
return r
''' % locals()
elif operator == '%':
## overflow check on % does not work with emulated int
code = '''%(checkfn)s(x // y)
r = x %% y
if x^y < 0 and x%%y != 0:
r -= y
return r
''' % locals()
else:
code = 'return %(checkfn)s(x %(operator)s y)' % locals()
exec py.code.Source("""
def %(fn)s(self, x, y):
assert isinstance(x, %(xtype)s)
assert isinstance(y, %(ytype)s)
try:
%(code)s
except (OverflowError, ValueError, ZeroDivisionError):
self.make_llexception()
""" % locals()).compile() in globals(), d
_makefunc2('op_int_add_ovf', '+', '(int, long, llmemory.AddressOffset)')
_makefunc2('op_int_mul_ovf', '*', '(int, long, llmemory.AddressOffset)', '(int, long)')
_makefunc2('op_int_sub_ovf', '-', '(int, long)')
_makefunc2('op_int_floordiv_ovf', '//', '(int, long)') # XXX negative args
_makefunc2('op_int_floordiv_zer', '//', '(int, long)') # can get off-by-one
_makefunc2('op_int_floordiv_ovf_zer', '//', '(int, long)') # (see op_int_floordiv)
_makefunc2('op_int_mod_ovf', '%', '(int, long)')
_makefunc2('op_int_mod_zer', '%', '(int, long)')
_makefunc2('op_int_mod_ovf_zer', '%', '(int, long)')
_makefunc2('op_uint_floordiv_zer', '//', 'r_uint')
_makefunc2('op_uint_mod_zer', '%', 'r_uint')
_makefunc2('op_llong_floordiv_zer', '//', 'r_longlong')
_makefunc2('op_llong_mod_zer', '%', 'r_longlong')
_makefunc2('op_ullong_floordiv_zer', '//', 'r_ulonglong')
_makefunc2('op_ullong_mod_zer', '%', 'r_ulonglong')
_makefunc2('op_lllong_floordiv_zer', '//', 'r_longlonglong')
_makefunc2('op_lllong_mod_zer', '%', 'r_longlonglong')
def op_int_add_nonneg_ovf(self, x, y):
if isinstance(y, int):
assert y >= 0
return self.op_int_add_ovf(x, y)
def op_int_is_true(self, x):
# special case
if type(x) is CDefinedIntSymbolic:
x = x.default
# if type(x) is a subclass of Symbolic, bool(x) will usually raise
# a TypeError -- unless __nonzero__ has been explicitly overridden.
assert is_valid_int(x) or isinstance(x, Symbolic)
return bool(x)
# hack for jit.codegen.llgraph
def op_check_and_clear_exc(self):
exc_data = self.llinterpreter.get_transformed_exc_data(self.graph)
assert exc_data
etype = exc_data.exc_type
evalue = exc_data.exc_value
exc_data.exc_type = lltype.typeOf(etype)._defl()
exc_data.exc_value = lltype.typeOf(evalue)._defl()
return bool(etype)
class Tracer(object):
Counter = 0
file = None
TRACE = int(os.getenv('PYPY_TRACE') or '0')
HEADER = """<html><head>
<script language=javascript type='text/javascript'>
function togglestate(n) {
var item = document.getElementById('div'+n)
if (item.style.display == 'none')
item.style.display = 'block';
else
item.style.display = 'none';
}
function toggleall(lst) {
for (var i = 0; i<lst.length; i++) {
togglestate(lst[i]);
}
}
</script>
</head>
<body><pre>
"""
FOOTER = """</pre>
<script language=javascript type='text/javascript'>
toggleall(%r);
</script>
</body></html>"""
ENTER = ('''\n\t<a href="javascript:togglestate(%d)">%s</a>'''
'''\n<div id="div%d" style="display: %s">\t''')
LEAVE = '''\n</div>\t'''
def htmlquote(self, s, text_to_html={}):
# HTML quoting, lazily initialized
if not text_to_html:
import htmlentitydefs
for key, value in htmlentitydefs.entitydefs.items():
text_to_html[value] = '&' + key + ';'
return ''.join([text_to_html.get(c, c) for c in s])
def start(self):
# start of a dump file
if not self.TRACE:
return
from rpython.tool.udir import udir
n = Tracer.Counter
Tracer.Counter += 1
filename = 'llinterp_trace_%d.html' % n
self.file = udir.join(filename).open('w')
print >> self.file, self.HEADER
linkname = str(udir.join('llinterp_trace.html'))
try:
os.unlink(linkname)
except OSError:
pass
try:
os.symlink(filename, linkname)
except (AttributeError, OSError):
pass
self.count = 0
self.indentation = ''
self.depth = 0
self.latest_call_chain = []
def stop(self):
# end of a dump file
if self.file:
print >> self.file, self.FOOTER % (self.latest_call_chain[1:])
self.file.close()
self.file = None
def enter(self, graph):
# enter evaluation of a graph
if self.file:
del self.latest_call_chain[self.depth:]
self.depth += 1
self.latest_call_chain.append(self.count)
s = self.htmlquote(str(graph))
i = s.rfind(')')
s = s[:i+1] + '<b>' + s[i+1:] + '</b>'
if self.count == 0:
display = 'block'
else:
display = 'none'
text = self.ENTER % (self.count, s, self.count, display)
self.indentation += ' '
self.file.write(text.replace('\t', self.indentation))
self.count += 1
def leave(self):
# leave evaluation of a graph
if self.file:
self.indentation = self.indentation[:-4]
self.file.write(self.LEAVE.replace('\t', self.indentation))
self.depth -= 1
def dump(self, text, bold=False):
if self.file:
text = self.htmlquote(text)
if bold:
text = '<b>%s</b>' % (text,)
self.file.write(text.replace('\n', '\n'+self.indentation))
def flush(self):
if self.file:
self.file.flush()
def wrap_callable(llinterpreter, fn, obj, method_name):
if method_name is None:
# fn is a StaticMethod
if obj is not None:
self_arg = [obj]
else:
self_arg = []
func_graph = fn.graph
else:
# obj is an instance, we want to call 'method_name' on it
assert fn is None
self_arg = [obj]
func_graph = obj._TYPE._methods[method_name._str].graph
return wrap_graph(llinterpreter, func_graph, self_arg)
def wrap_graph(llinterpreter, graph, self_arg):
"""
Returns a callable that inteprets the given func or method_name when called.
"""
def interp_func(*args):
graph_args = self_arg + list(args)
return llinterpreter.eval_graph(graph, args=graph_args)
interp_func.graph = graph
interp_func.self_arg = self_arg
return graph.name, interp_func
def enumerate_exceptions_top_down():
import exceptions
result = []
seen = {}
def addcls(cls):
if (type(cls) is type(Exception) and
issubclass(cls, py.builtin.BaseException)):
if cls in seen:
return
for base in cls.__bases__: # bases first
addcls(base)
result.append(cls)
seen[cls] = True
for cls in exceptions.__dict__.values():
addcls(cls)
return result
class _address_of_local_var(object):
_TYPE = llmemory.Address
def __init__(self, frame, v):
self._frame = frame
self._v = v
def _getaddress(self):
return _address_of_local_var_accessor(self._frame, self._v)
address = property(_getaddress)
class _address_of_local_var_accessor(object):
def __init__(self, frame, v):
self.frame = frame
self.v = v
def __getitem__(self, index):
if index != 0:
raise IndexError("address of local vars only support [0] indexing")
p = self.frame.getval(self.v)
result = llmemory.cast_ptr_to_adr(p)
# the GC should never see instances of _gctransformed_wref
result = self.unwrap_possible_weakref(result)
return result
def __setitem__(self, index, newvalue):
if index != 0:
raise IndexError("address of local vars only support [0] indexing")
if self.v.concretetype == llmemory.WeakRefPtr:
# fish some more
assert isinstance(newvalue, llmemory.fakeaddress)
p = llmemory.cast_ptr_to_weakrefptr(newvalue.ptr)
else:
p = llmemory.cast_adr_to_ptr(newvalue, self.v.concretetype)
self.frame.setvar(self.v, p)
def unwrap_possible_weakref(self, addr):
# fish fish fish
if addr and isinstance(addr.ptr._obj, llmemory._gctransformed_wref):
return llmemory.fakeaddress(addr.ptr._obj._ptr)
return addr
class _address_of_thread_local(object):
_TYPE = llmemory.Address
is_fake_thread_local_addr = True
# by default we route all logging messages to nothingness
# e.g. tests can then switch on logging to get more help
# for failing tests
from rpython.tool.ansi_print import ansi_log
py.log.setconsumer('llinterp', ansi_log)
| {
"content_hash": "402abedb6e1545ec11da2a120c9a7e45",
"timestamp": "",
"source": "github",
"line_count": 1349,
"max_line_length": 149,
"avg_line_length": 36.47442550037064,
"alnum_prop": 0.5637956263718397,
"repo_name": "jptomo/rpython-lang-scheme",
"id": "5f4c12152ed318d8f524146b4f0ce35c6c9a8271",
"size": "49204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rpython/rtyper/llinterp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "161293"
},
{
"name": "Batchfile",
"bytes": "5289"
},
{
"name": "C",
"bytes": "335765"
},
{
"name": "C++",
"bytes": "12638"
},
{
"name": "Emacs Lisp",
"bytes": "3149"
},
{
"name": "HCL",
"bytes": "155"
},
{
"name": "Makefile",
"bytes": "6988"
},
{
"name": "Objective-C",
"bytes": "1907"
},
{
"name": "Python",
"bytes": "16129160"
},
{
"name": "Scheme",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "721"
},
{
"name": "VimL",
"bytes": "1107"
}
],
"symlink_target": ""
} |
from django.template import Template, Context
from unittest import TestCase
class TemplateTestCase(TestCase):
library = None
def render(self, content, **context_data):
load_tpl = ''
if self.library:
load_tpl = '{% load ' + self.library + ' %}'
tpl = Template(load_tpl + content)
context = Context(context_data)
return tpl.render(context)
def render_template(content='', context_data={}):
tpl = Template(content)
context = Context(context_data)
result = tpl.render(context)
return result
| {
"content_hash": "4099e668a12104d150199a39b2e7b3ff",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 56,
"avg_line_length": 25.863636363636363,
"alnum_prop": 0.6362038664323374,
"repo_name": "redisca/django-redisca",
"id": "1ee019254f31748a03d6a30c3cf8d9dc51e76de0",
"size": "569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "redisca/testing/template.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "742"
},
{
"name": "HTML",
"bytes": "6744"
},
{
"name": "Python",
"bytes": "22737"
},
{
"name": "Shell",
"bytes": "79"
}
],
"symlink_target": ""
} |
"""A PyClassNode subclass that also exposes the instance as a Courier server."""
import datetime
from typing import Any, Callable, Generic, Optional, Mapping, TypeVar
from absl import logging
import courier
from launchpad import address as lp_address
from launchpad import flags as lp_flags
from launchpad.launch import worker_manager
from launchpad.launch import worker_manager_v2
from launchpad.nodes import base
from launchpad.nodes.courier import courier_utils
from launchpad.nodes.python import node as python
WorkerType = TypeVar('WorkerType')
CourierClient = courier.Client
COURIER_PORT_NAME = 'courier'
class CourierHandle(base.Handle[CourierClient]):
"""Handle of a CourierNode."""
def __init__(self, address: lp_address.Address, **kwargs):
self._address = address
self._kwargs = kwargs
def __getattr__(self, method):
raise AttributeError(
f'\'CourierHandle\' object has no attribute \'{method}\'. '
'Most likely you need to dereference handle before use '
'(see launchpad.maybe_dereference).')
def set_client_kwargs(self, **kwargs):
self._kwargs = kwargs
def dereference(self) -> CourierClient:
return CourierClient(self._address.resolve(), **self._kwargs)
class CourierNode(Generic[WorkerType], python.PyClassNode[CourierHandle,
WorkerType]):
"""Exposes a Python instance as a Courier server.
This will initialize the object and expose all its public methods as Courier
RPC methods. Attributes and method names starting with underscore will not be
exposed. After that, run() will be called if it's provided.
When run() is provided, the server will terminate at the end of run().
Otherwise, it will serve indefinitely (until the job/experiment terminates).
Advanced usage: if the object has a set_courier_server() method, it will be
called with the courier server object passed in as the only argument. The
courier server will then be managed by the user (e.g., need to manually call
Start() of the courier server).
"""
def __init__(self,
constructor: Callable[..., WorkerType],
*args,
courier_kwargs: Optional[Mapping[str, Any]] = None,
courier_client_kwargs: Optional[Mapping[str, Any]] = None,
**kwargs):
"""Initializes a new instance of the `CourierNode` class.
Args:
constructor: Function that creates a new instance of the actual worker.
*args: Positional arguments passed to `constructor`.
courier_kwargs: Keyword arguments passed to the courier server.
courier_client_kwargs: Keyword arguments passed to the courier clients.
**kwargs: Keyword arguments passed to `constructor`.
"""
super().__init__(constructor, *args, **kwargs)
self._address = lp_address.Address(COURIER_PORT_NAME)
self.allocate_address(self._address)
self._courier_kwargs = courier_kwargs or {}
self._courier_client_kwargs = courier_client_kwargs or {}
# Set in `run()` method.
self._server: Optional[courier.Server] = None
def configure(self, *args, **kwargs):
"""Sets the args and kwargs being passed to the constructor.
This is useful for achieving cyclic referencing. E.g.:
foo_node = CourierNode(_foo)
foo_handle = foo_node.create_handle()
bar_node = CourierNode(_bar)
bar_handle = bar_node.create_handle()
foo_node.configure(bar=bar_handle)
bar_node.configure(foo=foo_handle)
p.add_node(foo_node)
p.add_node(bar_node)
Args:
*args: non-keyword arguments to pass to the constructor.
**kwargs: keyword arguments to pass to the constructor.
"""
self._args = args
self._kwargs = kwargs
self._collect_input_handles()
def create_handle(self) -> CourierHandle:
"""See `Node.create_handle`."""
return self._track_handle(
CourierHandle(self._address, **self._courier_client_kwargs))
def run(self) -> None:
"""Creates the worker instance and executes the user-provided function."""
instance = self._construct_instance()
self._server = courier_utils.make_courier_server(
instance,
port=lp_address.get_port_from_address(self._address.resolve()),
**self._courier_kwargs)
if hasattr(instance, 'set_courier_server'):
# Transfer the ownership of the server to the instance, so that the user
# can decide when to start and stop the courier server.
instance.set_courier_server(self._server)
if hasattr(instance, 'run') and self._should_run:
instance.run()
else:
# Start the server after instantiation and serve forever
self._server.Start()
try:
if hasattr(instance, 'run') and self._should_run:
# If a run() method is provided, stop the server at the end of run().
instance.run()
else:
if lp_flags.LP_WORKER_MANAGER_V2.value:
worker_manager_v2.wait_for_stop()
else:
worker_manager.wait_for_stop()
finally:
self._server.Stop()
self._server.Join()
@property
def courier_address(self) -> lp_address.Address:
"""Returns the physical address of the courier server."""
return self._address
| {
"content_hash": "d40f292b183624adfa8a4acb4e4cc667",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 80,
"avg_line_length": 36.57241379310345,
"alnum_prop": 0.6705638317933246,
"repo_name": "deepmind/launchpad",
"id": "99652c9bf5e5f44b9bb637d5bcf3b47863de6ddd",
"size": "5919",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "launchpad/nodes/courier/node.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2395"
},
{
"name": "C++",
"bytes": "217863"
},
{
"name": "Dockerfile",
"bytes": "4420"
},
{
"name": "Python",
"bytes": "285547"
},
{
"name": "Shell",
"bytes": "12821"
},
{
"name": "Starlark",
"bytes": "51487"
}
],
"symlink_target": ""
} |
"""Package contenant les contexes de PNJ."""
from . import controler
| {
"content_hash": "ec9737d58cf941555a0e5b565c609088",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 44,
"avg_line_length": 23.333333333333332,
"alnum_prop": 0.7285714285714285,
"repo_name": "stormi/tsunami",
"id": "665c32b2a244d1a0a86675bc91fe8289ac8e1509",
"size": "1635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/pnj/contextes/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7188300"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
} |
class AuthNeeded(Exception):
pass
class InconsistenState(Exception):
pass
| {
"content_hash": "7b0b9f7e90087ed8380f4124787db281",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 34,
"avg_line_length": 14,
"alnum_prop": 0.7380952380952381,
"repo_name": "peralmq/olloapi",
"id": "7fa5f84d8237a4665954096f9b5b19448b839c92",
"size": "84",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1583"
}
],
"symlink_target": ""
} |
'''
Gets the given sys.argv[1] as a city_name
Reads all the CSV file inside 'data/' folder as a DataFrame
Calculates total population of each city
Plots it to a Histogram Chart for each year under output/ folder
@author: pekzeki
'''
import commons
import pygal
import glob
import os
import sys
if len(sys.argv) > 1:
city_name = sys.argv[1].decode('utf-8')
population = []
years = []
files = glob.glob(os.path.join('data','*.csv'))
for file in files:
year = int(commons.get_year(file))
tmp = commons.get_population(file)
row = tmp.loc[tmp['Sehir'] == sys.argv[1]]
population.append(commons.convert_integer(row['Toplam'].irow(0)))
years.append(year)
#sort list
sorted_population = sorted(population)
sorted_years = sorted(years)
#Plotting the data
bar_chart = pygal.Bar(y_title='Population', x_title='Years')
bar_chart.title = 'Population/Years for ' + city_name
bar_chart.add(city_name, sorted_population)
bar_chart.x_labels = map(str, sorted_years)
bar_chart.render_to_file('output/evolution_'+city_name+'.svg')
else:
print "Missing Argument: City_Name" | {
"content_hash": "3e1e155a7710eb232114e5fbe96aa5cf",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 93,
"avg_line_length": 28.844444444444445,
"alnum_prop": 0.5955315870570108,
"repo_name": "pekzeki/PopulationAnalysis",
"id": "ddf7321b9701c6a05692e8062848bacb542b90e1",
"size": "1322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "evolution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19176"
}
],
"symlink_target": ""
} |
""" Tests barbante.api.consolidate_user_templates.
"""
import json
import nose.tools
import barbante.api.consolidate_user_templates as script
import barbante.utils.logging as barbante_logging
import barbante.tests as tests
log = barbante_logging.get_logger(__name__)
def test_script():
""" Tests a call to script barbante.api.consolidate_user_templates.
"""
result = script.main([tests.TEST_ENV])
log.debug(result)
result_json = json.dumps(result)
nose.tools.ok_(result_json) # a well-formed json is enough
if __name__ == '__main__':
test_script() | {
"content_hash": "1f2a01b1092c345c7187fd340f2ae030",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 71,
"avg_line_length": 22.5,
"alnum_prop": 0.7025641025641025,
"repo_name": "hypermindr/barbante",
"id": "056b0d9377f4ecc78633e89eb3421deb5ef20ae9",
"size": "585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "barbante/api/tests/test_consolidate_user_templates_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "728872"
}
],
"symlink_target": ""
} |
__all___ = [
'YubicoError',
'StatusCodeError',
'InvalidClientIdError',
'InvalidValidationResponse',
'SignatureVerificationError'
]
class YubicoError(Exception):
""" Base class for Yubico related exceptions. """
pass
class StatusCodeError(YubicoError):
def __init__(self, status_code):
self.status_code = status_code
def __str__(self):
return ('Yubico server returned the following status code: %s' %
(self.status_code))
class InvalidClientIdError(YubicoError):
def __init__(self, client_id):
self.client_id = client_id
def __str__(self):
return 'The client with ID %s does not exist' % (self.client_id)
class InvalidValidationResponse(YubicoError):
def __init__(self, reason, response, parameters=None):
self.reason = reason
self.response = response
self.parameters = parameters
self.message = self.reason
def __str__(self):
return self.reason
class SignatureVerificationError(YubicoError):
def __init__(self, generated_signature, response_signature):
self.generated_signature = generated_signature
self.response_signature = response_signature
def __str__(self):
return repr('Server response message signature verification failed' +
'(expected %s, got %s)' % (self.generated_signature,
self.response_signature))
| {
"content_hash": "f2e5db128c1dffd84192bf221907e8eb",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 77,
"avg_line_length": 28.725490196078432,
"alnum_prop": 0.6286689419795222,
"repo_name": "Yubico/python-yubico-client-dpkg",
"id": "891336c8ba0c4d4c6bda534f3bc9129b0303a224",
"size": "1465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yubico_client/yubico_exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "35064"
}
],
"symlink_target": ""
} |
from clamped import BarClamp
x = BarClamp()
x.call()
| {
"content_hash": "7f9540e9dc342a33c6396d848492bb13",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 28,
"avg_line_length": 13.5,
"alnum_prop": 0.7222222222222222,
"repo_name": "jimbaker/clamped",
"id": "95af799846aa64c3db69197470ace3d73ad63cd2",
"size": "54",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__run__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "343"
},
{
"name": "Python",
"bytes": "14003"
},
{
"name": "TeX",
"bytes": "12664"
}
],
"symlink_target": ""
} |
import os
import tempfile
from .vendor.six.moves import range
def printable_decimal_and_hex(num):
return "{0:d} (0x{0:x})".format(num)
def assert_index_sane(index, upper_bound_exclusive):
assert type(index) == int, "Indices should be integers; '%s' is not" % (
index)
assert 0 <= index < upper_bound_exclusive, (
"Index %d out of range [%d, %d)" % (index, 0, upper_bound_exclusive))
class ObjectLookupDict(object):
def __init__(self, id_list, object_list):
self.id_list = id_list
self.object_list = object_list
def __getitem__(self, index):
assert_index_sane(index, len(self.id_list))
return self.object_list[self.id_list[index]]
def __setitem__(self, index, value):
assert_index_sane(index, len(self.id_list))
self.id_list[index] = value.index
def name_without_zeroes(name):
"""
Return a human-readable name without LSDJ's trailing zeroes.
:param name: the name from which to strip zeroes
:rtype: the name, without trailing zeroes
"""
first_zero = name.find(b'\0')
if first_zero == -1:
return name
else:
return str(name[:first_zero])
class temporary_file:
def __enter__(self):
(tmp_handle, tmp_abspath) = tempfile.mkstemp()
os.close(tmp_handle)
self.abspath = tmp_abspath
return self.abspath
def __exit__(self, t, value, traceback):
if hasattr(self, 'abspath') and self.abspath is not None:
os.unlink(self.abspath)
def fixed_width_string(string, width, fill=' '):
return string[:width].ljust(fill)
| {
"content_hash": "3d9be25c23b97393d097776573e9b15e",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 77,
"avg_line_length": 27.844827586206897,
"alnum_prop": 0.6247678018575852,
"repo_name": "iLambda/lsdj-wave-cruncher",
"id": "f8e74494dc3509e7b22e02f7cc572dd8c28a432c",
"size": "1615",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/pylsdj/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "9253"
},
{
"name": "Python",
"bytes": "488195"
}
],
"symlink_target": ""
} |
from flask import redirect, render_template, render_template_string, Blueprint, g
from flask import request, url_for
from flask_user import current_user, login_required, roles_accepted
from flask.ext.babel import Babel
from app import app, db
from app.core.models import UserProfileForm
core_blueprint = Blueprint('core', __name__, url_prefix='/')
babel = Babel(app)
# Enabling localization
@app.before_request
def before():
if request.view_args and 'lang_code' in request.view_args:
if request.view_args['lang_code'] not in ('es', 'en'):
return abort(404)
else:
g.current_lang = request.view_args['lang_code']
request.view_args.pop('lang_code')
@babel.localeselector
def get_locale():
return g.get('current_lang', 'de')
# The Home page is accessible to anyone
@core_blueprint.route('')
def home_page():
return render_template('core/home_page.html', lang_code='de')
# The FAQ page is accessible to anyone
@core_blueprint.route('faq')
def faq_page():
return render_template('core/faq.html')
# The User page is accessible to authenticated users (users that have logged in)
@core_blueprint.route('user')
@login_required # Limits access to authenticated users
def user_page():
return render_template('core/user_page.html')
# The Admin page is accessible to users with the 'admin' role
@core_blueprint.route('admin')
@roles_accepted('admin') # Limits access to users with the 'admin' role
def admin_page():
return render_template('core/admin_page.html')
@core_blueprint.route('user/profile', methods=['GET', 'POST'])
@login_required
def user_profile_page():
# Initialize form
form = UserProfileForm(request.form, current_user)
# Process valid POST
if request.method == 'POST' and form.validate():
# Copy form fields to user_profile fields
form.populate_obj(current_user)
# Save user_profile
db.session.commit()
# Redirect to home page
return redirect(url_for('core.home_page'))
# Process GET or invalid POST
return render_template('core/user_profile_page.html',
form=form)
# Register blueprint
app.register_blueprint(core_blueprint)
| {
"content_hash": "bfa33d40cfcf63db6fb893cfe058447f",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 81,
"avg_line_length": 30.767123287671232,
"alnum_prop": 0.6829919857524488,
"repo_name": "dleicht/planx",
"id": "09b37913f94319c80208582271bb53653c854482",
"size": "2347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/core/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6027"
},
{
"name": "HTML",
"bytes": "45996"
},
{
"name": "JavaScript",
"bytes": "760"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "29236"
},
{
"name": "Shell",
"bytes": "4012"
}
],
"symlink_target": ""
} |
print("Fuel calculator - Simple cost per litre to cost per gallon\n")
# Calculates the cost per gallon from the cost per litre
try:
costperlitre = input("Please input cost per litre (eg 0.99 is £0.99): £")
costpergallon = float(costperlitre) * 4.54609
formattedcostpergallon = format((costpergallon),'.2f' )
print("Cost per gallon: £%s\n" % formattedcostpergallon)
except ValueError:
print("Error: That's an invalid value, you should enter a price in decimal form")
# Converts litres to gallons and then provides a summary for litres and gallons including total costs
try:
litres = input("Please input number of litres: ")
gallons = float(litres) / 4.54609
formattedgallons = format((gallons),'.2f' )
litrecost = float(costperlitre) * float(litres)
formattedlitrecost = format((litrecost),'.2f' )
gallonscost = costpergallon * gallons
formattedgallonscost = format((gallonscost),'.2f' )
print("%s litres = %s gallons \n" % (litres, formattedgallons))
print("Summary:")
print("%s litres @ £%s/l gives total £%s" % (litres, costperlitre, formattedlitrecost))
print("%s gallons @ £%s/gal gives total £%s\n" % (formattedgallons, formattedcostpergallon, formattedgallonscost))
input("Press enter to exit")
except ValueError:
print("Error: That's an invalid value")
input("Press enter to exit")
| {
"content_hash": "a4483226facca4941be50f0ae750bf76",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 118,
"avg_line_length": 34.475,
"alnum_prop": 0.6961566352429297,
"repo_name": "Cryptostrike/Fuelcalc",
"id": "e10d2d7198c1a5a4b057edde824a01a87a932689",
"size": "1386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fuel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1386"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.