text stringlengths 4 1.02M | meta dict |
|---|---|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('organizers', '0001_initial'),
('applications', '0020_new_applications_20200402_1036'),
]
operations = [
migrations.AlterField(
model_name='applicationcomment',
name='application',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='applications.HackerApplication'),
),
migrations.AlterField(
model_name='vote',
name='application',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='applications.HackerApplication'),
),
]
| {
"content_hash": "09f9406c5cf863e18ecd35c436c843f4",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 118,
"avg_line_length": 31.08,
"alnum_prop": 0.6435006435006435,
"repo_name": "hackupc/backend",
"id": "be08bd3ba11e94dded5f5fdd662948966cdeede0",
"size": "851",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "organizers/migrations/0002_to_new_applications_20200402_1036.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3625"
},
{
"name": "HTML",
"bytes": "81735"
},
{
"name": "JavaScript",
"bytes": "4029"
},
{
"name": "Python",
"bytes": "118984"
},
{
"name": "Shell",
"bytes": "1659"
}
],
"symlink_target": ""
} |
from __future__ import division
import glob
import json
import os
from collections import Counter
from copy import deepcopy
from fractions import Fraction
from send_email import send_email
try:
import abcparse
except ImportError:
import gamc.abcparse as abcparse
if not os.path.exists("ABCs"):
os.makedirs("ABCs")
with open("sessions_data_clean.txt") as f:
abcs = f.read().split("\n\n")
n_subdirs = len(abcs) // 5000
for i in range(n_subdirs + 1):
if not os.path.exists(os.path.join("ABCs", str(i))):
os.makedirs(os.path.join("ABCs", str(i)))
for i, abc in enumerate(abcs):
subdir = i // 5000
with open(os.path.join("ABCs", str(subdir), str(i) + ".abc"), "w") as f:
f.write(abc + "\n\n")
def get_N_grams(_input, n):
output = dict() # store n-tuple
# str or float
# if isinstance(_input[0], float):
# _input = [
# str(Fraction(1 / ele).limit_denominator(32)) for ele in _input
# ]
for i in range(len(_input) - n + 1):
n_gram = ' '.join(_input[i: i + n]) # 相邻的单词构成一个n元组
if n_gram in output: # 统计n元组的词频
output[n_gram] += 1
else:
output[n_gram] = 1
return output
def compute_statistics(abc_file):
"""
Compute statistics: note, names(pitch), duration
:param abc_file: str - path to a abc file
:return:
"""
try:
key, meter, notes = abcparse.parse_abc(abc_file)
km = key + "_" + meter
names, durations = zip(*notes)
names = [n.rstrip("*") for n in names] # strip louder symbol `*`
# convert duration to fraction form
durations = [ # denominator - 分母
str(Fraction(1 / ele).limit_denominator(32)) for ele in
durations]
notes = (
name + "_" + duration for name, duration in zip(names, durations))
names_2 = get_N_grams(names, 2)
names_3 = get_N_grams(names, 3)
names_4 = get_N_grams(names, 4)
duration_2 = get_N_grams(durations, 2)
duration_3 = get_N_grams(durations, 3)
duration_4 = get_N_grams(durations, 4)
return {
km: {
"name": Counter(names),
"duration": Counter(durations),
"note": Counter(notes),
"name_2": Counter(names_2),
"name_3": Counter(names_3),
"name_4": Counter(names_4),
"duration_2": Counter(duration_2),
"duration_3": Counter(duration_3),
"duration_4": Counter(duration_4),
}
}
except Exception as e:
pass
def concat_statistics(a, b):
for km, nd in b.iteritems():
if km in a.keys():
for k, v in nd.iteritems():
a[km][k].update(v) # update Counter
else:
a[km] = nd # add new key and meter combination
return a
def classify(statistics):
flat = {}
on_key = {}
on_meter = {}
for km, nd in statistics.iteritems():
key, meter = km.split("_")
for k, v in nd.iteritems():
if k in flat.keys():
flat[k].update(v)
else:
flat[k] = deepcopy(v) # !!! 如果不是深度复制, 会修改 statistics
if key in on_key.keys():
for k, v in nd.iteritems():
on_key[key][k].update(v)
else:
on_key[key] = deepcopy(nd)
if meter in on_meter.keys():
for k, v in nd.iteritems():
on_meter[meter][k].update(v)
else:
on_meter[meter] = deepcopy(nd)
return flat, on_key, on_meter
# import joblib
# statistics = joblib.Parallel(n_jobs=1, verbose=0)( # n_jobs > 1, has problem
# joblib.delayed(compute_statistics)(abc_file)
# for abc_file in glob.glob(os.path.join("ABCs", "999*.abc")))
subdirs = os.listdir("ABCs")
for sd in subdirs:
statistics = (compute_statistics(abc_file)
for abc_file in
glob.iglob(os.path.join("ABCs", sd, "*.abc")))
statistics = (s for s in statistics if
s is not None) # eliminate None value
statistics = reduce(concat_statistics, statistics)
with open("abc_result.txt" + subdir, "w") as f:
f.write(json.dumps(statistics))
# statistics_flat, statistics_key, statistics_meter = classify(statistics)
#
#
# with open("abc_result_flat.txt", "w") as f:
# f.write(json.dumps(statistics_flat))
#
# with open("abc_result_key.txt", "w") as f:
# f.write(json.dumps(statistics_key))
#
# with open("abc_result_meter.txt", "w") as f:
# f.write(json.dumps(statistics_meter))
send_email()
| {
"content_hash": "d73257f479e0b4869df6db4e92ef0888",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 79,
"avg_line_length": 27.611764705882354,
"alnum_prop": 0.548146570089476,
"repo_name": "Engine-Treasure/Graduation-Project",
"id": "c882ed1385a684e485111e5f16fbbe096c8d0a62",
"size": "4780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/abc_analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "125113"
},
{
"name": "Makefile",
"bytes": "85"
},
{
"name": "Python",
"bytes": "100540"
}
],
"symlink_target": ""
} |
from __future__ import division
from metrix_db.initialiser import processing_statistic_name_mapping
# Name of columns for statistics
stat_name_list = ['Overall_Stats', 'High_Res_Stats', 'Low_Res_Stats']
class XIA2Parser(object):
'''
A class to represent the xia2 parser
'''
def __init__(self, handle):
'''
Init the class with the handle
'''
import sqlite3
self.handle = handle
self.cur = self.handle.cursor()
def _select_id_from_pdb_id(self, pdb_id):
'''
Find individual id for a PDB code in table PDB_id
'''
self.cur.execute('''
SELECT id FROM PDB_id WHERE PDB_id.pdb_id="%s"
''' % (pdb_id))
return self.cur.fetchone()[0]
def _insert_or_ignore_into_sweeps(self, pdb_id):
'''
Find the SWEEPS and their IDs that belong to a particular
PDB_id
'''
self.cur.execute('''
INSERT OR IGNORE INTO SWEEPS
(pdb_id_id) SELECT id FROM PDB_id
WHERE PDB_id.pdb_id="%s"
''' % (pdb_id))
def _select_id_from_sweeps(self, pdb_pk):
'''
Finding the SWEEP_ids belonging to a particular PDB_id
and return them
'''
self.cur.execute('''
SELECT id FROM SWEEPS WHERE SWEEPS.pdb_id_id="%s"
''' % (pdb_pk))
sweep_pk = self.cur.fetchall()[-1][0]
return sweep_pk
def _insert_into_sweep_id(self, name, sweep_pk):
'''
Find the column/stat name to be entered for each selected sweep_id
'''
self.cur.execute('''
INSERT INTO %s (sweep_id) VALUES (%s)
''' % (name, sweep_pk))
def _update_high_res_stats(self, name, value, sweep_pk):
'''
Update high resolution stats for a selected sweep_id
'''
self.cur.execute('''
UPDATE High_Res_Stats SET %s = %s
WHERE sweep_id = %s
''' % (name, value, sweep_pk))
def _update_low_res_stats(self, name, value, sweep_pk):
'''
Update low resolution stats for a seleted sweep_id
'''
self.cur.execute('''
UPDATE Low_Res_Stats SET %s = %s
WHERE sweep_id = %s
''' % (name, value, sweep_pk))
def _update_overall_stats(self, name, value, sweep_pk):
'''
Update overall stats for a selected sweep_id
'''
self.cur.execute('''
UPDATE Overall_Stats SET %s = %s
WHERE sweep_id = %s
''' % (name, value, sweep_pk))
def _update_stats(self, name, overall, low, high, sweep_pk):
'''
Update all stats for overall, low and high resolution at once
'''
if overall is not None:
self._update_overall_stats(name, overall, sweep_pk)
if low is not None:
self._update_low_res_stats(name, low, sweep_pk)
if high is not None:
self._update_high_res_stats(name, high, sweep_pk)
def _update_wavelength(self, sweep_pk, wavelength):
'''
Update the wavelength for a given sweep_id
'''
self.cur.execute('''
UPDATE SWEEPS SET wavelength = %s WHERE id = "%s"
''' % (wavelength, sweep_pk))
def _insert_into_dev_stats(self, sweep_pk):
'''
Enter sweep_id into Dev_stats table
'''
self.cur.execute('''
INSERT INTO Dev_Stats_json (sweep_id) VALUES (%s)
''' % (sweep_pk))
def _update_dev_stats_date_time(self, sweep_pk):
'''
Update the timestamp for each sweep_id in
Dev_stat Table
'''
import datetime
self.cur.execute('''
UPDATE Dev_Stats_json SET date_time = "%s"
WHERE Dev_Stats_json.sweep_id= "%s"
''' % (str(datetime.datetime.today()), sweep_pk))
def _get_number_of_executions(self, pdb_pk):
'''
Get the current execution number for each sweep_id from Dev_stats
'''
self.cur.execute('''
SELECT pdb_id_id FROM SWEEPS WHERE SWEEPS.pdb_id_id=%s
''' % (pdb_pk))
number_of_executions = len(self.cur.fetchall())
return number_of_executions
def _update_dev_stats_execution_number(self, sweep_pk, number_of_executions):
'''
Update the execution number for each sweep_id and enter in Dev_stats
table
'''
self.cur.execute('''
UPDATE Dev_Stats_json SET execution_number = "%s"
WHERE Dev_Stats_json.sweep_id="%s"
''' % (number_of_executions, sweep_pk))
def _update_dev_stats_dials_version(self, sweep_pk, dials_version):
'''
Update the dials version for each sweep_id
'''
self.cur.execute('''
UPDATE Dev_Stats_json SET dials_version ="%s"
WHERE Dev_Stats_json.sweep_id="%s"
''' % (dials_version, sweep_pk))
def _update_dev_stats(self, pdb_pk, sweep_pk, dials_version):
'''
Update some statistic metadata for each sweep_id
'''
self._insert_into_dev_stats(sweep_pk)
self._update_dev_stats_date_time(sweep_pk)
number_of_executions = self._get_number_of_executions(pdb_pk)
self._update_dev_stats_execution_number(sweep_pk, number_of_executions)
self._update_dev_stats_dials_version(sweep_pk, dials_version)
def _update_sweep_and_dev_stats(self, pdb_id, pdb_pk, wavelength, statistics, dials_version):
'''
Update all the information for a sweep with wavelength and statistics
'''
# Create a new sweep ID and get the sweep database id
self._insert_or_ignore_into_sweeps(pdb_id)
sweep_pk = self._select_id_from_sweeps(pdb_pk)
# Add the statistics as columns
for name in stat_name_list:
self._insert_into_sweep_id(name, sweep_pk)
# Update the wavelength of the sweep
self._update_wavelength(sweep_pk, wavelength)
# For each statistic, enter into the database
for stat, name in processing_statistic_name_mapping.iteritems():
if stat in statistics:
assert len(statistics[stat]) in [1, 3]
if len(statistics[stat]) == 3:
overall, low, high = statistics[stat]
else:
overall, low, high = statistics[stat][0], None, None
self._update_stats(processing_statistic_name_mapping[stat], overall, low, high, sweep_pk)
# Update the dev stats stuff
self._update_dev_stats(pdb_pk, sweep_pk, dials_version)
def _update_data_type(self, data_type, pdb_pk):
'''
Update the data type for the PDB ID (e.g. SAD, MAD, MR)
'''
self.cur.execute('''
UPDATE PDB_id SET
data_type = ? WHERE id = ?
''', (data_type, pdb_pk))
def _commit(self):
'''
Commit changes back to the database
'''
self.handle.commit()
def _is_sad_mad_or_mr(self, data):
'''
Decide if data is SAD, MAD or MR
'''
crystals = data['_crystals']
data_type = None
for name in crystals.iterkeys():
wavelengths = crystals[name]['_wavelengths'].keys()
if 'NATIVE' in wavelengths:
assert data_type is None or data_type == 'MR'
data_type = 'MR'
elif 'SAD' in wavelengths:
assert data_type is None or data_type == 'SAD'
data_type = 'SAD'
elif all(w.startswith('WAVE') for w in wavelengths):
assert data_type is None or data_type == 'MAD'
data_type = 'MAD'
else:
return None
return data_type
def _parse_xia2_sad(self, pdb_id, pdb_pk, data, dials_version):
'''
Parse XIA2 SAD Data
'''
# Loop through all the crystals
crystals = data['_crystals']
for crystal_name in crystals.iterkeys():
# Get statistics and wavelengths
crystal = crystals[crystal_name]
if not '_scaler' in crystal or crystal['_scaler'] is None:
continue
scaler = crystal['_scaler']
scalr_statistics = scaler['_scalr_statistics']
wavelengths = crystal['_wavelengths']
# Get the statistics and wavelength for the sweep
result = scalr_statistics['["AUTOMATIC", "%s", "SAD"]' % crystal_name]
wavelength = wavelengths['SAD']['_wavelength']
# Update the statistics
self._update_sweep_and_dev_stats(pdb_id, pdb_pk, wavelength, result, dials_version)
# Update the data type
self._update_data_type("SAD", pdb_pk)
print 'SAD data input for %s completed.' % (pdb_id)
def _parse_xia2_mad(self, pdb_id, pdb_pk, data, dials_version):
'''
Parse XIA2 MAD Data
'''
# Loop through all the crystals
crystals = data['_crystals']
for crystal_name in crystals.iterkeys():
# Get statistics and wavelengths
crystal = crystals[crystal_name]
if not '_scaler' in crystal or crystal['_scaler'] is None:
continue
scaler = crystal['_scaler']
scalr_statistics = scaler['_scalr_statistics']
wavelengths = crystal['_wavelengths']
# Loop through the wavelengths
for wave in range(1, len(scalr_statistics.keys())+1):
# Get the statistics and wavelength for the sweep
result = scalr_statistics['["AUTOMATIC", "%s", "WAVE%d"]' % (crystal_name, wave)]
wavelength = wavelengths['WAVE%d' % wave]['_wavelength']
# Update the statistics
self._update_sweep_and_dev_stats(pdb_id, pdb_pk, wavelength, result, dials_version)
# Update the data type
self._update_data_type("MAD", pdb_pk)
print 'MAD data input for %s completed.' % (pdb_id)
def _parse_xia2_mr(self, pdb_id, pdb_pk, data, dials_version):
'''
Parse XIA2 MR data
'''
# Loop through all the crystals
crystals = data['_crystals']
for crystal_name in crystals.iterkeys():
# Get statistics and wavelengths
crystal = crystals[crystal_name]
if not '_scaler' in crystal or crystal['_scaler'] is None:
continue
scaler = crystal['_scaler']
scalr_statistics = scaler['_scalr_statistics']
wavelengths = crystal['_wavelengths']
# Get the statistics and wavelength for the sweep
result = scalr_statistics['["AUTOMATIC", "%s", "NATIVE"]' % crystal_name]
wavelength = wavelengths['NATIVE']['_wavelength']
# Update the statistics
self._update_sweep_and_dev_stats(pdb_id, pdb_pk, wavelength, result, dials_version)
# Update the data type
self._update_data_type("MR", pdb_pk)
print 'MR data input for %s completed. ' % (pdb_id)
def _parse_xia2_json(self, pdb_id, filename, dials_version):
'''
Parse a xia2.json file
'''
import json
# Load the XIA2 Json file
data = json.load(open(filename))
# Perform check for SAD, MAD or MR
check = self._is_sad_mad_or_mr(data)
# Select entry for pdb_id
pdb_pk = self._select_id_from_pdb_id(pdb_id)
# Execute function based on data type
if check == 'SAD':
self._parse_xia2_sad(pdb_id, pdb_pk, data, dials_version)
elif check == 'MAD':
self._parse_xia2_mad(pdb_id, pdb_pk, data, dials_version)
elif check == 'MR':
self._parse_xia2_mr(pdb_id, pdb_pk, data, dials_version)
else:
raise RuntimeError('Data needs to be SAD, MAD or MR: found %s' % check)
# Commit changes back to the database
self._commit()
def _parse_xia2_txt(self, filename):
'''
Get the DIALS version
'''
with open(filename) as infile:
for line in infile.readlines():
if line.startswith('DIALS'):
dials_version = line[6:]
return dials_version
raise RuntimeError("Couldn't read DIALS version from %s" % filename)
def add_entry(self, pdb_id, xia2_txt_filename, xia2_json_filename):
'''
Add the xia2 entry
'''
# Parse the xia2.txt
dials_version = self._parse_xia2_txt(xia2_txt_filename)
# Parse the xia2 json file
self._parse_xia2_json(pdb_id, xia2_json_filename, dials_version)
| {
"content_hash": "02aaea4d28b66789223802ac7d1aca7f",
"timestamp": "",
"source": "github",
"line_count": 379,
"max_line_length": 97,
"avg_line_length": 29.99208443271768,
"alnum_prop": 0.623207530570951,
"repo_name": "jmp1985/metrix-database",
"id": "236483903fb714d188d3932e663cfe17c7e564ed",
"size": "11391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xia2_parser.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "42006"
}
],
"symlink_target": ""
} |
from random import choice, shuffle
names_fd = open('names.txt', 'ro')
names = map(lambda name: name.strip(), names_fd.readlines())
names_fd.close()
events = []
for i in xrange(3):
events.append("friends")
events.append("hates")
entries = set()
while len(entries) < 8000:
event = choice(events)
person_a = choice(names)
person_b = choice(names)
case = ""
if event == "hates":
case = person_a + " hates " + person_b
else:
case = "friends " + person_a + " " + person_b
entries.add(case)
result = list(entries)
shuffle(result)
for entry in result:
print entry
| {
"content_hash": "6d57bd95cff1e97b09bd03505362e36e",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 60,
"avg_line_length": 15.625,
"alnum_prop": 0.608,
"repo_name": "MAPSuio/spring-challenge16",
"id": "22e0b4ff9919a93b2eae00eb638817ac9a034791",
"size": "625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frenemies/generate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Clojure",
"bytes": "4921"
},
{
"name": "Haskell",
"bytes": "2557"
},
{
"name": "Java",
"bytes": "722"
},
{
"name": "Python",
"bytes": "19088"
},
{
"name": "Scheme",
"bytes": "1100"
},
{
"name": "Shell",
"bytes": "591"
}
],
"symlink_target": ""
} |
import re
from django.utils.text import compress_string
from django.utils.cache import patch_vary_headers
from django import http
try:
import settings
XS_SHARING_ALLOWED_ORIGINS = settings.XS_SHARING_ALLOWED_ORIGINS
XS_SHARING_ALLOWED_METHODS = settings.XS_SHARING_ALLOWED_METHODS
XS_SHARING_ALLOWED_HEADERS = settings.XS_SHARING_ALLOWED_HEADERS
except:
XS_SHARING_ALLOWED_ORIGINS = '*'
XS_SHARING_ALLOWED_METHODS = ['POST','GET','OPTIONS', 'PUT', 'DELETE']
XS_SHARING_ALLOWED_HEADERS = ['Origin', 'Content-Type', 'Accept']
class XsSharing(object):
"""
This middleware allows cross-domain XHR using the html5 postMessage API.
Access-Control-Allow-Origin: http://foo.example
Access-Control-Allow-Methods: POST, GET, OPTIONS, PUT, DELETE
"""
def process_request(self, request):
if 'HTTP_ACCESS_CONTROL_REQUEST_METHOD' in request.META:
response = http.HttpResponse()
response['Access-Control-Allow-Origin'] = XS_SHARING_ALLOWED_ORIGINS
response['Access-Control-Allow-Methods'] = ",".join( XS_SHARING_ALLOWED_METHODS )
response['Access-Control-Allow-Headers'] = ",".join( XS_SHARING_ALLOWED_HEADERS )
return response
return None
def process_response(self, request, response):
# Avoid unnecessary work
if response.has_header('Access-Control-Allow-Origin'):
return response
response['Access-Control-Allow-Origin'] = XS_SHARING_ALLOWED_ORIGINS
response['Access-Control-Allow-Methods'] = ",".join( XS_SHARING_ALLOWED_METHODS )
response['Access-Control-Allow-Headers'] = ",".join( XS_SHARING_ALLOWED_HEADERS )
return response
| {
"content_hash": "2ebd680aede9102acb1e3cabcb0a5292",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 94,
"avg_line_length": 36.958333333333336,
"alnum_prop": 0.6623449830890643,
"repo_name": "chrispbailey/duat",
"id": "4405644e915737b745281b87b40a953c53514b55",
"size": "1774",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "duat/middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3858"
},
{
"name": "HTML",
"bytes": "142"
},
{
"name": "JavaScript",
"bytes": "11917"
},
{
"name": "Python",
"bytes": "24689"
}
],
"symlink_target": ""
} |
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ResourceAttributes(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, group=None, name=None, namespace=None, resource=None, subresource=None, verb=None, version=None):
"""
V1ResourceAttributes - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'group': 'str',
'name': 'str',
'namespace': 'str',
'resource': 'str',
'subresource': 'str',
'verb': 'str',
'version': 'str'
}
self.attribute_map = {
'group': 'group',
'name': 'name',
'namespace': 'namespace',
'resource': 'resource',
'subresource': 'subresource',
'verb': 'verb',
'version': 'version'
}
self._group = group
self._name = name
self._namespace = namespace
self._resource = resource
self._subresource = subresource
self._verb = verb
self._version = version
@property
def group(self):
"""
Gets the group of this V1ResourceAttributes.
Group is the API Group of the Resource. \"*\" means all.
:return: The group of this V1ResourceAttributes.
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""
Sets the group of this V1ResourceAttributes.
Group is the API Group of the Resource. \"*\" means all.
:param group: The group of this V1ResourceAttributes.
:type: str
"""
self._group = group
@property
def name(self):
"""
Gets the name of this V1ResourceAttributes.
Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.
:return: The name of this V1ResourceAttributes.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1ResourceAttributes.
Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.
:param name: The name of this V1ResourceAttributes.
:type: str
"""
self._name = name
@property
def namespace(self):
"""
Gets the namespace of this V1ResourceAttributes.
Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview
:return: The namespace of this V1ResourceAttributes.
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""
Sets the namespace of this V1ResourceAttributes.
Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview
:param namespace: The namespace of this V1ResourceAttributes.
:type: str
"""
self._namespace = namespace
@property
def resource(self):
"""
Gets the resource of this V1ResourceAttributes.
Resource is one of the existing resource types. \"*\" means all.
:return: The resource of this V1ResourceAttributes.
:rtype: str
"""
return self._resource
@resource.setter
def resource(self, resource):
"""
Sets the resource of this V1ResourceAttributes.
Resource is one of the existing resource types. \"*\" means all.
:param resource: The resource of this V1ResourceAttributes.
:type: str
"""
self._resource = resource
@property
def subresource(self):
"""
Gets the subresource of this V1ResourceAttributes.
Subresource is one of the existing resource types. \"\" means none.
:return: The subresource of this V1ResourceAttributes.
:rtype: str
"""
return self._subresource
@subresource.setter
def subresource(self, subresource):
"""
Sets the subresource of this V1ResourceAttributes.
Subresource is one of the existing resource types. \"\" means none.
:param subresource: The subresource of this V1ResourceAttributes.
:type: str
"""
self._subresource = subresource
@property
def verb(self):
"""
Gets the verb of this V1ResourceAttributes.
Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.
:return: The verb of this V1ResourceAttributes.
:rtype: str
"""
return self._verb
@verb.setter
def verb(self, verb):
"""
Sets the verb of this V1ResourceAttributes.
Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.
:param verb: The verb of this V1ResourceAttributes.
:type: str
"""
self._verb = verb
@property
def version(self):
"""
Gets the version of this V1ResourceAttributes.
Version is the API Version of the Resource. \"*\" means all.
:return: The version of this V1ResourceAttributes.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this V1ResourceAttributes.
Version is the API Version of the Resource. \"*\" means all.
:param version: The version of this V1ResourceAttributes.
:type: str
"""
self._version = version
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| {
"content_hash": "a2b6b17c8196f1da792ece8a7b09ec20",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 360,
"avg_line_length": 30.511194029850746,
"alnum_prop": 0.5750275162039868,
"repo_name": "skuda/client-python",
"id": "716746018a732a5954da03aa9870e421d3532a33",
"size": "8194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/client/models/v1_resource_attributes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5907789"
},
{
"name": "Shell",
"bytes": "8195"
}
],
"symlink_target": ""
} |
'''
This module handles rendering the gradebook and its associated functions such
as download and handling rendering rows of the table
'''
#Import the app
from app import app
#Import needed flask functions
from flask import g, render_template, redirect, url_for, flash, jsonify, abort
from flask import request, after_this_request, send_file
from flask.ext.login import current_user, login_required
#Import the models we need on these pages
from app.structures.models.user import *
from app.structures.models.gradebook import *
from app.structures.models.course import *
#Import forms for this page
from app.structures.forms import CreateGradeColumnForm, CreateGradebookGroupForm
#Import app helpers
from app.helpers.gradebook import getStudentAssignmentScores, getStudentAuxScores
#Import other python helpers
import tempfile, csv
@app.route('/gradebook/<cid>/<bool:instr>')
@login_required
def viewGradebook(cid, instr):
'''
Function Type: View Function
Template: instructor/gradebook.html
Purpose: Display all of the grades for this course. Allow for creation of
arbitrary submission entries.
Inputs:
cid: The object ID of the course to display
Template Parameters: TODO
Forms Handled: TODO
'''
try:
c = Course.objects.get(id=cid)
if instr and not c in current_user.courseInstructor:
abort(403)
elif not instr and not c in current_user.gradingCourses():
abort(403)
#Get the users for this course
s = User.objects.filter(courseStudent=c)
disableColForm = False
colForm = CreateGradeColumnForm()
colForm.group.choices = [(x.id,x.name) for x in c.gradeBook.auxillaryGrades]
if len(colForm.group.choices) == 0:
colForm.group.choices = [("N/A", "N/A")]
disableColForm = True
s = list(s)
s.sort(key=lambda x:x.username)
uids = [str(u.id) for u in s]
return render_template('common/gradebook.html', course=c, uids=uids,\
groupForm=CreateGradebookGroupForm(),\
colForm=colForm, disableColForm=disableColForm,\
instructor=instr)
except Course.DoesNotExist:
abort(404)
@app.route('/gradebook/download/<cid>/<bool:raw>')
@login_required
def serveGradebook(cid, raw):
try:
course = Course.objects.get(id=cid)
if not course in current_user.courseInstructor:
abort(403)
csvf = tempfile.NamedTemporaryFile()
writer = csv.writer(csvf, delimiter=",", quotechar="\"")
#Put the main headers on
row = ['', 'Assignment']
for a in course.gradeBook.groups():
row += [a.name]*a.getWidth()
writer.writerow(row)
#Put the problem headers on
row = ['', 'Problem']
for c in course.gradeBook.columns():
if c == None:
row += ["None"]
else:
row += [c.name]
row += ['Total Points']
writer.writerow(row)
#Put in the max points row
row = ['', 'Max Points']
for c in course.gradeBook.columns():
if c == None:
row += [0]
else:
row += [c.maxScore]
row += [course.gradeBook.totalPoints()]
writer.writerow(row)
writer.writerow(['Name', 'Username'])
#Do the user rows
students = User.objects.filter(courseStudent=course)
for s in students:
if course.anonymousGrading:
row = [(str(s.firstName) + ' ' + str(s.lastName)), \
(s.username+ ' (' + course.getIdentifier(s.username) + ')')]
else:
row = [(str(s.firstName) + ' ' + str(s.lastName)), s.username]
userCourseScore = 0
scores = getStudentAssignmentScores(course, s)
for a in scores:
if len(a) == 0:
row += ['N/A']
continue
for p in a:
if p == None:
row += [0]
else:
if 'finalTotalScore' in p:
row += [p['finalTotalScore']]
userCourseScore += p['finalTotalScore']
else:
row += [p['rawTotalScore']]
userCourseScore += p['rawTotalScore']
for group in course.gradeBook.auxillaryGrades:
if len(group.columns) == 0:
row += [0]
for col in group.columns:
score = col.scores.setdefault(s.username, None)
if score:
row += [score.totalScore()]
userCourseScore += score.totalScore()
else:
row += [0]
row += [userCourseScore]
writer.writerow(row)
#Be kind rewind the file
csvf.seek(0)
return send_file(csvf,as_attachment=True, attachment_filename='grades.csv', cache_timeout=50)
except Course.DoesNotExist:
abort(404)
def createHighlight(gradeSpec):
if 'highlight' in gradeSpec:
if gradeSpec['highlight'] == 'red':
return "class='danger'"
elif gradeSpec['highlight'] == 'yellow':
return "class='warning'"
elif gradeSpec['highlight'] == 'blue':
return "class='info'"
elif gradeSpec['highlight'] == 'green':
return "class='success'"
else:
return ""
@app.route('/gradebook/<cid>/<bool:instr>/renderGrade', methods=['POST'])
@login_required
def commonRenderGrade(cid, instr):
try:
content = request.get_json()
c = Course.objects.get(id=cid)
u = User.objects.get(id=content['uid'])
#If we are accessing the instructor version check that they are an instr
if instr and not c in current_user.courseInstructor:
abort(403)
if not c in current_user.gradingCourses():
abort(403)
assignmentScores = getStudentAssignmentScores(c, u)
userCourseScore = 0
outString = "<tr>"
# <td>{{username/identifier}}</td>
outString += "<td>"
if instr:
outString += u.username
if c.anonymousGrading:
outString += " (" + c.getIdentifier(u.keyOfUsername()) + ")"
else:
if c.anonymousGrading:
outString += c.getIdentifier(u.keyOfUsername())
else:
outString += u.username
outString += "</td>"
# <td>{{link to problem grading}}</td>
for assignment, a in zip(assignmentScores, c.assignments):
#If this assignment doesn't have any problems we put a blank column in
if len(assignment) == 0:
outString += "<td class='active'></td>"
continue
for problem, p in zip(assignment, a.problems):
if problem == None:
#If there was no submission link to the make blank page
outString += "<td class='active'><a href='"
outString += "#'" #TODO Replace this with an actual link
outString += ">0.00"
outString += "</a></td>"
else:
highlight = createHighlight(problem)
url = url_for('grutorGradeSubmission', pid=p.id, uid=u.id, subnum=p.getSubmissionNumber(u))
if 'finalTotalScore' in problem:
points = problem['finalTotalScore']
userCourseScore += problem['finalTotalScore']
else:
points = problem['rawTotalScore']
userCourseScore += problem['rawTotalScore']
maxPoints = p.gradeColumn.maxScore
cellTemplate = "<td %s><a href='%s'>%.2f</a></td>" % (highlight, url, points)
outString += cellTemplate
for group in c.gradeBook.auxillaryGrades:
if len(group.columns) == 0:
outString += "<td class='active'></td>"
continue
for col in group.columns:
score = col.scores.setdefault(u.keyOfUsername(), None)
if score:
outString += "<td>%.2f</td>" % (score.totalScore())
userCourseScore += score.totalScore()
else:
outString += "<td>%.2f</td>" % (0)
outString += "<td>%.2f</td></tr>" % (userCourseScore)
return jsonify(res=outString)
except (Course.DoesNotExist,User.DoesNotExist):
abort(404)
@app.route('/gradebook/<cid>/addGroup', methods=['POST'])
@login_required
def addGradeGroup(cid):
'''
Function Type: Callback-Redirect Function
Purpose: Add a grade group to the gradebook for a course
Inputs:
cid: The object ID of the course to modify
Forms Handled:
CreateGradebookGroupForm: Reads in the name and creates that group if it
doesn't already exist
'''
try:
c = Course.objects.get(id=cid)
#For security purposes we send anyone who isnt an instructor or
#admin away
if not (g.user.isAdmin or c in current_user.courseInstructor):
abort(403)
grades = c.gradeBook.auxillaryGrades
if request.method == 'POST':
form = CreateGradebookGroupForm(request.form)
if form.validate():
for grade in grades:
if grade.name == form.groupName.data:
flash("Group name already exists")
return redirect(url_for('viewGradebook', cid=cid))
group = GBGroup(form.groupName.data)
group.save()
c.gradeBook.auxillaryGrades.append(group)
c.save()
flash("Added group")
return redirect(url_for('viewGradebook', cid=cid, instr=True))
except Exception as e:
raise e
@app.route('/gradebook/<cid>/addColumn', methods=['POST'])
@login_required
def addGradeColumn(cid):
'''
Function Type: Callback-Redirect Function
Purpose: Add a grade group to the gradebook for a course
Inputs:
cid: The object ID of the course to modify
Forms Handled:
CreateGradebookGroupForm: Reads in the name and creates that group if it
doesn't already exist
'''
try:
c = Course.objects.get(id=cid)
#For security purposes we send anyone who isnt an instructor or
#admin away
if not (g.user.isAdmin or c in current_user.courseInstructor):
abort(403)
if request.method == 'POST':
form = CreateGradeColumnForm(request.form)
form.group.choices = [(unicode(x.id),x.name) for x in c.gradeBook.auxillaryGrades]
if form.validate():
group = GBGroup.objects.get(id=form.group.data)
for c in group.columns:
if c.name == form.name.data:
flash("Column name already exists", "warning")
return redirect(url_for('viewGradebook', cid=cid, instr=True))
col = GBColumn(form.name.data)
col.save()
group.columns.append(col)
group.save()
flash("Group added")
return redirect(url_for('viewGradebook', cid=cid, instr=True))
except Exception as e:
raise e
@app.route('/gradebook/<cid>/edit/<col>/<bool:instr>')
@login_required
def redirectGradebook(cid, col, instr):
'''
Function Type: Callback-Redirect Function
Purpose: Given a gradebook column redirect to either the problem editing page
or the column editing page depening on if the column is for a problem or just
an arbitrary grade
Inputs:
cid: The object ID for the course that this column is in
col: The object ID of the GBColumn that we are trying to redirect to
Forms Handled: None
'''
c = Course.objects.get(id=cid)
col = GBColumn.objects.get(id=col)
for a in c.assignments:
for p in a.problems:
if p.gradeColumn == col:
return redirect(url_for('grutorGradelistProblem', pid=p.id))
return redirect(url_for('editAuxillaryGrades', cid=cid, instr=instr, col=col.id))
#
# We are lumping the gradebook column in here with the gradebook table
#
@app.route('/gradebook/<cid>/<bool:instr>/<col>')
@login_required
def editAuxillaryGrades(cid, instr, col):
'''
Function Type: View Function
Template: instructor/editcolumn.html
Purpose: Allows the grutor to edit one column of the gradebook manually
Inputs:
cid: The object ID of the course to authenticate the grader
col: The object ID of the column to be edited
Template Parameters: TODO
'''
try:
course = Course.objects.get(id=cid)
column = GBColumn.objects.get(id=col)
if instr and not course in current_user.courseInstructor:
abort(403)
elif not instr and not course in current_user.gradingCourses():
abort(403)
users = User.objects.filter(courseStudent=course)
for u in users:
if not u.keyOfUsername() in column.scores:
grade = GBGrade()
grade.scores['score'] = 0
grade.save()
column.scores[u.keyOfUsername()] = grade
column.save()
return render_template("common/auxillaryGrades.html", course = course, col=column, users=users, instructor=instr)
except (Course.DoesNotExist, GBColumn.DoesNotExist):
abort(404)
@app.route('/gradebook/<cid>/<col>/save', methods=['POST'])
@login_required
def saveGradeColumn(cid,col):
try:
course = Course.objects.get(id=cid)
column = GBColumn.objects.get(id=col)
if not (course in current_user.gradingCourses() or current_user.isAdmin):
return jsonify(res=False)
content = request.get_json()
column.maxScore = content['maxScore']
for id in content['scores']:
u = User.objects.get(id=id)
column.scores[u.keyOfUsername()].scores['score'] = content['scores'][id]
column.scores[u.keyOfUsername()].save()
column.save()
return jsonify(res=True)
except Exception as e:
return jsonify(res=False, exeption=str(e))
| {
"content_hash": "0d3985cddf00eb2800f0baac8c77594d",
"timestamp": "",
"source": "github",
"line_count": 425,
"max_line_length": 117,
"avg_line_length": 30.569411764705883,
"alnum_prop": 0.6428571428571429,
"repo_name": "noellekimiko/HMC-Grader",
"id": "52136a338e0e94fd32832fe3441c4056e052a654",
"size": "13008",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/userViews/common/gradeBook.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "99385"
},
{
"name": "HTML",
"bytes": "679813"
},
{
"name": "Java",
"bytes": "688247"
},
{
"name": "JavaScript",
"bytes": "163008"
},
{
"name": "Makefile",
"bytes": "4561"
},
{
"name": "Perl",
"bytes": "60"
},
{
"name": "Prolog",
"bytes": "23764"
},
{
"name": "Python",
"bytes": "506076"
},
{
"name": "Racket",
"bytes": "42672"
},
{
"name": "Shell",
"bytes": "1029"
}
],
"symlink_target": ""
} |
'''
Petitboot Internationalization (i8n)
------------------------------------
Test that Petitboot can correctly display languages other than English.
'''
import time
import pexpect
import unittest
import OpTestConfiguration
from common.OpTestSystem import OpSystemState
import common.OpTestMambo as OpTestMambo
import logging
import OpTestLogger
log = OpTestLogger.optest_logger_glob.get_logger(__name__)
class Petitbooti18n(unittest.TestCase):
def setUp(self):
conf = OpTestConfiguration.conf
self.cv_SYSTEM = conf.system()
def runTest(self):
self.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT)
log.debug("Test i18n strings appear correctly in Petitboot")
if (isinstance(self.cv_SYSTEM.console, OpTestMambo.MamboConsole)):
raise unittest.SkipTest("Mambo so skipping Language tests")
# Wait a moment for pb-discover to connect
time.sleep(3)
raw_pty = self.cv_SYSTEM.console.get_console()
raw_pty.sendcontrol('l') # refresh the screen
rc = raw_pty.expect(
['Petitboot', pexpect.TIMEOUT, pexpect.EOF], timeout=5)
raw_pty.send("l") # key press L
rc = raw_pty.expect(
['Deutsch', pexpect.TIMEOUT, pexpect.EOF], timeout=5)
rc = raw_pty.expect(
['English', pexpect.TIMEOUT, pexpect.EOF], timeout=5)
rc = raw_pty.expect(
['Español', pexpect.TIMEOUT, pexpect.EOF], timeout=5)
rc = raw_pty.expect(
['Français', pexpect.TIMEOUT, pexpect.EOF], timeout=5)
rc = raw_pty.expect(
['Italiano', pexpect.TIMEOUT, pexpect.EOF], timeout=5)
rc = raw_pty.expect(['日本語', pexpect.TIMEOUT, pexpect.EOF], timeout=5)
rc = raw_pty.expect(['한국어', pexpect.TIMEOUT, pexpect.EOF], timeout=5)
rc = raw_pty.expect(
['Português/Brasil', pexpect.TIMEOUT, pexpect.EOF], timeout=5)
rc = raw_pty.expect(
['Русский', pexpect.TIMEOUT, pexpect.EOF], timeout=5)
rc = raw_pty.expect(['简体中文', pexpect.TIMEOUT, pexpect.EOF], timeout=5)
rc = raw_pty.expect(['繁體中文', pexpect.TIMEOUT, pexpect.EOF], timeout=5)
# Return to the Petitboot main menu
raw_pty.sendcontrol('l') # refresh the screen to Languages
raw_pty.send("x") # exit to main petitboot menu
raw_pty.sendcontrol('l') # refresh the main petitboot menu
raw_pty.sendcontrol('u') # clear from cursor move cursor
rc = raw_pty.expect(
['x=exit', pexpect.TIMEOUT, pexpect.EOF], timeout=10)
pass
| {
"content_hash": "6509a51c32fa0a7e82afce449a731a88",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 78,
"avg_line_length": 38.44776119402985,
"alnum_prop": 0.6319875776397516,
"repo_name": "open-power/op-test-framework",
"id": "6453c906aeddb39d7e428ad0f8b0879380feafcf",
"size": "3588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testcases/Petitbooti18n.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2080"
},
{
"name": "Python",
"bytes": "1311268"
},
{
"name": "Shell",
"bytes": "71724"
},
{
"name": "Tcl",
"bytes": "18813"
}
],
"symlink_target": ""
} |
"""Binary for evaluating Tensorflow models on the YouTube-8M dataset."""
import time
import numpy
import eval_util
import losses
import ensemble_level_models
import readers
import tensorflow as tf
from tensorflow import app
from tensorflow import flags
from tensorflow import gfile
from tensorflow import logging
import utils
FLAGS = flags.FLAGS
if __name__ == "__main__":
# Dataset flags.
flags.DEFINE_string("model_checkpoint_path", None,
"The file to load the model files from. ")
flags.DEFINE_string("output_file", "",
"The file to save the predictions to.")
flags.DEFINE_string(
"input_data_patterns", "",
"File globs defining the evaluation dataset in tensorflow.SequenceExample format.")
flags.DEFINE_string(
"input_data_pattern", None,
"File globs for original model input.")
flags.DEFINE_string("feature_names", "predictions", "Name of the feature "
"to use for training.")
flags.DEFINE_string("feature_sizes", "4716", "Length of the feature vectors.")
# Model flags.
flags.DEFINE_string(
"model", "LogisticModel",
"Which architecture to use for the model.")
flags.DEFINE_integer("batch_size", 256,
"How many examples to process per batch.")
# Other flags.
flags.DEFINE_boolean("run_once", True, "Whether to run eval only once.")
flags.DEFINE_integer("top_k", 20, "How many predictions to output per video.")
def format_lines(video_ids, predictions, top_k):
batch_size = len(video_ids)
for video_index in range(batch_size):
top_indices = numpy.argpartition(predictions[video_index], -top_k)[-top_k:]
line = [(class_index, predictions[video_index][class_index])
for class_index in top_indices]
line = sorted(line, key=lambda p: -p[1])
yield video_ids[video_index].decode('utf-8') + "," + " ".join("%i %f" % pair
for pair in line) + "\n"
def find_class_by_name(name, modules):
"""Searches the provided modules for the named class and returns it."""
modules = [getattr(module, name, None) for module in modules]
return next(a for a in modules if a)
def get_input_data_tensors(reader,
data_pattern,
batch_size=256):
logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
with tf.name_scope("eval_input"):
files = gfile.Glob(data_pattern)
if not files:
raise IOError("Unable to find the evaluation files.")
logging.info("number of evaluation files: " + str(len(files)))
files.sort()
filename_queue = tf.train.string_input_producer(
files, shuffle=False, num_epochs=1)
eval_data = reader.prepare_reader(filename_queue)
return tf.train.batch(
eval_data,
batch_size=batch_size,
capacity=3 * batch_size,
allow_smaller_final_batch=True,
enqueue_many=True)
def build_graph(all_readers,
all_data_patterns,
input_reader,
input_data_pattern,
model,
batch_size=256):
"""Creates the Tensorflow graph for evaluation.
Args:
all_readers: The data file reader. It should inherit from BaseReader.
model: The core model (e.g. logistic or neural net). It should inherit
from BaseModel.
all_data_patterns: glob path to the evaluation data files.
batch_size: How many examples to process at a time.
"""
global_step = tf.Variable(0, trainable=False, name="global_step")
model_input_raw_tensors = []
labels_batch_tensor = None
video_id_batch = None
for reader, data_pattern in zip(all_readers, all_data_patterns):
unused_video_id, model_input_raw, labels_batch, unused_num_frames = (
get_input_data_tensors(
reader,
data_pattern,
batch_size=batch_size))
if labels_batch_tensor is None:
labels_batch_tensor = labels_batch
if video_id_batch is None:
video_id_batch = unused_video_id
model_input_raw_tensors.append(tf.expand_dims(model_input_raw, axis=2))
original_input = None
if input_data_pattern is not None:
unused_video_id, original_input, unused_labels_batch, unused_num_frames = (
get_input_data_tensors(
input_reader,
input_data_pattern,
batch_size=batch_size))
model_input = tf.concat(model_input_raw_tensors, axis=2)
labels_batch = labels_batch_tensor
with tf.name_scope("model"):
result = model.create_model(model_input,
labels=labels_batch,
vocab_size=reader.num_classes,
original_input=original_input,
is_training=False)
predictions = result["predictions"]
tf.add_to_collection("global_step", global_step)
tf.add_to_collection("predictions", predictions)
tf.add_to_collection("input_batch", model_input)
tf.add_to_collection("video_id_batch", video_id_batch)
tf.add_to_collection("labels", tf.cast(labels_batch, tf.float32))
def inference_loop(video_id_batch, prediction_batch, label_batch,
saver, out_file_location):
top_k = FLAGS.top_k
with tf.Session() as sess, gfile.Open(out_file_location, "w+") as out_file:
checkpoint = FLAGS.model_checkpoint_path
if checkpoint:
logging.info("Loading checkpoint for eval: " + checkpoint)
saver.restore(sess, checkpoint)
global_step_val = checkpoint.split("/")[-1].split("-")[-1]
else:
logging.info("No checkpoint file found.")
return global_step_val
sess.run([tf.local_variables_initializer()])
# Start the queue runners.
fetches = [video_id_batch, prediction_batch]
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(
sess, coord=coord, daemon=True,
start=True))
logging.info("enter eval_once loop global_step_val = %s. ",
global_step_val)
num_examples_processed = 0
start_time = time.time()
out_file.write("VideoId,LabelConfidencePairs\n")
while not coord.should_stop():
batch_start_time = time.time()
video_id_val, predictions_val = sess.run(fetches)
now = time.time()
num_examples_processed += len(video_id_val)
logging.info("num examples processed: " + str(num_examples_processed) + " elapsed seconds: " + "{0:.2f}".format(now-start_time))
for line in format_lines(video_id_val, predictions_val, top_k):
out_file.write(line)
out_file.flush()
except tf.errors.OutOfRangeError as e:
logging.info('Done with inference. The output file was written to ' + out_file_location)
except Exception as e: # pylint: disable=broad-except
logging.info("Unexpected exception: " + str(e))
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def inference():
tf.set_random_seed(0) # for reproducibility
with tf.Graph().as_default():
# convert feature_names and feature_sizes to lists of values
feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
FLAGS.feature_names, FLAGS.feature_sizes)
# prepare a reader for each single model prediction result
all_readers = []
all_patterns = FLAGS.input_data_patterns
all_patterns = map(lambda x: x.strip(), all_patterns.strip().strip(",").split(","))
for i in xrange(len(all_patterns)):
reader = readers.EnsembleReader(
feature_names=feature_names, feature_sizes=feature_sizes)
all_readers.append(reader)
input_reader = None
input_data_pattern = None
if FLAGS.input_data_pattern is not None:
input_reader = readers.EnsembleReader(
feature_names=["input"], feature_sizes=[1024+128])
input_data_pattern = FLAGS.input_data_pattern
model = find_class_by_name(FLAGS.model, [ensemble_level_models])()
if FLAGS.input_data_patterns is "":
raise IOError("'input_data_patterns' was not specified. " +
"Nothing to evaluate.")
build_graph(
all_readers=all_readers,
all_data_patterns=all_patterns,
input_reader=input_reader,
input_data_pattern=input_data_pattern,
model=model,
batch_size=FLAGS.batch_size)
logging.info("built evaluation graph")
video_id_batch = tf.get_collection("video_id_batch")[0]
prediction_batch = tf.get_collection("predictions")[0]
label_batch = tf.get_collection("labels")[0]
saver = tf.train.Saver(tf.global_variables())
inference_loop(video_id_batch, prediction_batch, label_batch,
saver, FLAGS.output_file)
def main(unused_argv):
logging.set_verbosity(tf.logging.INFO)
print("tensorflow version: %s" % tf.__version__)
inference()
if __name__ == "__main__":
app.run()
| {
"content_hash": "e8ef1df525017589e2720df52d183759",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 136,
"avg_line_length": 35.719367588932805,
"alnum_prop": 0.6408100033196857,
"repo_name": "wangheda/youtube-8m",
"id": "31d8babf6a47f6fc36d0070d244d424e2c424aa2",
"size": "9584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "youtube-8m-ensemble/inference.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1360297"
},
{
"name": "Python",
"bytes": "1614517"
},
{
"name": "Shell",
"bytes": "372502"
},
{
"name": "Vim script",
"bytes": "443"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from .base_model_ import Model
from datetime import date, datetime
from typing import List, Dict
from ..util import deserialize_model
class Manifest(Model):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id: str=None, plan_id: str=None, service_id: str=None, manifest_type: str=None, manifest_content: str=None, endpoints: object=None):
"""
Manifest - a model defined in Swagger
:param id: The id of this Manifest.
:type id: str
:param plan_id: The plan_id of this Manifest.
:type plan_id: str
:param service_id: The service_id of this Manifest.
:type service_id: str
:param manifest_type: The manifest_type of this Manifest.
:type manifest_type: str
:param manifest_content: The manifest_content of this Manifest.
:type manifest_content: str
:param endpoints: The endpoints of this Manifest.
:type endpoints: object
"""
self.swagger_types = {
'id': str,
'plan_id': str,
'service_id': str,
'manifest_type': str,
'manifest_content': str,
'endpoints': object
}
self.attribute_map = {
'id': 'id',
'plan_id': 'plan_id',
'service_id': 'service_id',
'manifest_type': 'manifest_type',
'manifest_content': 'manifest_content',
'endpoints': 'endpoints'
}
self._id = id
self._plan_id = plan_id
self._service_id = service_id
self._manifest_type = manifest_type
self._manifest_content = manifest_content
self._endpoints = endpoints
@classmethod
def from_dict(cls, dikt) -> 'Manifest':
"""
Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Manifest of this Manifest.
:rtype: Manifest
"""
return deserialize_model(dikt, cls)
@property
def id(self) -> str:
"""
Gets the id of this Manifest.
An identifier used to correlate this manifest with the selected plan and service. This MUST be globally unique within a platform marketplace. MUST be a non-empty string. Using a GUID is RECOMMENDED.
:return: The id of this Manifest.
:rtype: str
"""
return self._id
@id.setter
def id(self, id: str):
"""
Sets the id of this Manifest.
An identifier used to correlate this manifest with the selected plan and service. This MUST be globally unique within a platform marketplace. MUST be a non-empty string. Using a GUID is RECOMMENDED.
:param id: The id of this Manifest.
:type id: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`")
self._id = id
@property
def plan_id(self) -> str:
"""
Gets the plan_id of this Manifest.
The plan the manifest should be associated with
:return: The plan_id of this Manifest.
:rtype: str
"""
return self._plan_id
@plan_id.setter
def plan_id(self, plan_id: str):
"""
Sets the plan_id of this Manifest.
The plan the manifest should be associated with
:param plan_id: The plan_id of this Manifest.
:type plan_id: str
"""
if plan_id is None:
raise ValueError("Invalid value for `plan_id`, must not be `None`")
self._plan_id = plan_id
@property
def service_id(self) -> str:
"""
Gets the service_id of this Manifest.
The service type (id) the manifest should be assocaited with
:return: The service_id of this Manifest.
:rtype: str
"""
return self._service_id
@service_id.setter
def service_id(self, service_id: str):
"""
Sets the service_id of this Manifest.
The service type (id) the manifest should be assocaited with
:param service_id: The service_id of this Manifest.
:type service_id: str
"""
if service_id is None:
raise ValueError("Invalid value for `service_id`, must not be `None`")
self._service_id = service_id
@property
def manifest_type(self) -> str:
"""
Gets the manifest_type of this Manifest.
The type of system that that manifest targets
:return: The manifest_type of this Manifest.
:rtype: str
"""
return self._manifest_type
@manifest_type.setter
def manifest_type(self, manifest_type: str):
"""
Sets the manifest_type of this Manifest.
The type of system that that manifest targets
:param manifest_type: The manifest_type of this Manifest.
:type manifest_type: str
"""
if manifest_type is None:
raise ValueError("Invalid value for `manifest_type`, must not be `None`")
self._manifest_type = manifest_type
@property
def manifest_content(self) -> str:
"""
Gets the manifest_content of this Manifest.
The manifest content
:return: The manifest_content of this Manifest.
:rtype: str
"""
return self._manifest_content
@manifest_content.setter
def manifest_content(self, manifest_content: str):
"""
Sets the manifest_content of this Manifest.
The manifest content
:param manifest_content: The manifest_content of this Manifest.
:type manifest_content: str
"""
if manifest_content is None:
raise ValueError("Invalid value for `manifest_content`, must not be `None`")
self._manifest_content = manifest_content
@property
def endpoints(self) -> object:
"""
Gets the endpoints of this Manifest.
A set of endpoints that the service instance exposes. This includes APIs and UIs.
:return: The endpoints of this Manifest.
:rtype: object
"""
return self._endpoints
@endpoints.setter
def endpoints(self, endpoints: object):
"""
Sets the endpoints of this Manifest.
A set of endpoints that the service instance exposes. This includes APIs and UIs.
:param endpoints: The endpoints of this Manifest.
:type endpoints: object
"""
self._endpoints = endpoints
| {
"content_hash": "9aee620031d99c90a75b53a0b88a6639",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 207,
"avg_line_length": 30.757009345794394,
"alnum_prop": 0.5928289273776968,
"repo_name": "EduJGURJC/elastest-service-manager",
"id": "03ed71e57a03947f6407d2d418596e2b2595ab5a",
"size": "6599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/esm/models/manifest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "220094"
},
{
"name": "Shell",
"bytes": "1322"
}
],
"symlink_target": ""
} |
import unittest
def get_tests():
return full_suite()
def full_suite():
from .msgpack_ext import MsgPackTestCase
#from .nodes import NodeTestCase
msgpack_suite = unittest.TestLoader().loadTestsFromTestCase(MsgPackTestCase)
#node_suite = unittest.TestLoader().loadTestsFromTestCase(NodeTestCase)
return unittest.TestSuite([msgpack_suite,])
| {
"content_hash": "dd0c8b85324af518d46177c8cd9f1648",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 80,
"avg_line_length": 24.533333333333335,
"alnum_prop": 0.7472826086956522,
"repo_name": "meteotest/hurray-py",
"id": "bfe8cad90c4761832f1b199567571fa59a06677a",
"size": "368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "87161"
}
],
"symlink_target": ""
} |
import glob
import lxml.etree
import os
import re
from checker.base import BakeryTestCase as TestCase
from fontaine.builder import Director, Builder
from fontaine.cmap import library
COPYRIGHT_REGEX = re.compile(r'Copyright.*?20\d{2}.*', re.U | re.I)
def get_test_subset_function(value):
def function(self):
self.assertEqual(value, 100)
function.tags = ['note']
return function
class FontaineTest(TestCase):
targets = ['upstream', 'result']
tool = 'PyFontaine'
name = __name__
path = '.'
def setUp(self):
# This test uses custom collections for pyFontaine call
# We should save previous state of collection and then
# restore it in tearDown
self.old_collections = library.collections
def tearDown(self):
library.collections = self.old_collections
@classmethod
def __generateTests__(cls):
pattern = re.compile('[\W_]+')
library.collections = ['subsets']
tree = Director().construct_tree([cls.path])
contents = Builder.xml_(tree).doc.toprettyxml(indent=" ")
docroot = lxml.etree.fromstring(contents)
for orth in docroot.xpath('//orthography'):
value = int(orth.xpath('./percentCoverage/text()')[0])
common_name = orth.xpath('./commonName/text()')[0]
shortname = pattern.sub('', common_name)
exec 'cls.test_charset_%s = get_test_subset_function(%s)' % (shortname, value)
exec 'cls.test_charset_%s.__func__.__doc__ = "Is %s covered 100%%?"' % (shortname, common_name)
class ConsistencyTest(TestCase):
targets = ['consistency']
tool = 'Regex'
name = __name__
path = '.'
def setUp(self):
self.ufo_dirs = []
self.l = len(self.path)
for root, dirs, files in os.walk(self.path):
for d in dirs:
fullpath = os.path.join(root, d)
if os.path.splitext(fullpath)[1].lower() == '.ufo':
self.ufo_dirs.append(fullpath)
def test_copyright_notices_same_across_family(self):
""" Are all copyright notices the same in all styles? """
copyright = None
for ufo_folder in self.ufo_dirs:
current_notice = self.lookup_copyright_notice(ufo_folder)
if current_notice is None:
continue
if copyright is not None and current_notice != copyright:
self.fail('"%s" != "%s"' % (current_notice, copyright))
break
copyright = current_notice
def grep_copyright_notice(self, contents):
match = COPYRIGHT_REGEX.search(contents)
if match:
return match.group(0).strip(',\r\n')
return
def lookup_copyright_notice(self, ufo_folder):
current_path = ufo_folder
try:
contents = open(os.path.join(ufo_folder, 'fontinfo.plist')).read()
copyright = self.grep_copyright_notice(contents)
if copyright:
return copyright
except (IOError, OSError):
pass
while os.path.realpath(self.path) != current_path:
# look for all text files inside folder
# read contents from them and compare with copyright notice
# pattern
files = glob.glob(os.path.join(current_path, '*.txt'))
files += glob.glob(os.path.join(current_path, '*.ttx'))
for filename in files:
with open(os.path.join(current_path, filename)) as fp:
match = COPYRIGHT_REGEX.search(fp.read())
if not match:
continue
return match.group(0).strip(',\r\n')
current_path = os.path.join(current_path, '..') # go up
current_path = os.path.realpath(current_path)
return
import robofab.world
import robofab.objects
class UfoOpenTest(TestCase):
targets = ['upstream']
tool = 'Robofab'
name = __name__
path = '.'
def setUp(self):
self.font = robofab.world.OpenFont(self.path)
# You can use ipdb here to interactively develop tests!
# Uncommand the next line, then at the iPython prompt: print(self.path)
# import ipdb; ipdb.set_trace()
# def test_success(self):
# """ This test succeeded """
# self.assertTrue(True)
#
# def test_failure(self):
# """ This test failed """
# self.assertTrue(False)
#
# def test_error(self):
# """ Unexpected error """
# 1 / 0
# self.assertTrue(False)
def test_it_exists(self):
""" Does this UFO path exist? """
self.assertEqual(os.path.exists(self.path), True)
def test_is_folder(self):
""" Is this UFO really a folder?"""
self.assertEqual(os.path.isdir(self.path), True)
def test_is_ended_ufo(self):
""" Does this font file's name end with '.ufo'?"""
self.assertEqual(self.path.lower().endswith('.ufo'), True)
# @tags('required')
def test_is_A(self):
""" Does this font have a glyph named 'A'?"""
self.assertTrue('A' in self.font)
def test_is_A_a_glyph_instance(self):
""" Is this font's property A an instance of an RGlyph object? """
if 'A' in self.font:
a = self.font['A']
else:
a = None
self.assertIsInstance(a, robofab.objects.objectsRF.RGlyph)
def test_is_fsType_eq_1(self):
"""Is the OS/2 table fsType set to 0?"""
desiredFsType = [0]
self.assertEqual(self.font.info.openTypeOS2Type, desiredFsType)
# TODO check if this is a good form of test
def has_character(self, unicodeString):
"""Does this font include a glyph for the given unicode character?"""
# TODO check the glyph has at least 1 contour
character = unicodeString[0]
glyph = None
if character in self.font:
glyph = self.font[character]
self.assertIsInstance(glyph, robofab.objects.objectsRF.RGlyph)
def test_has_rupee(self):
u"""Does this font include a glyph for ₹, the Indian Rupee Sign codepoint?"""
self.has_character(self, u'₹')
| {
"content_hash": "3d124b446f770c6a667ef2cc0ef15103",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 107,
"avg_line_length": 33.62162162162162,
"alnum_prop": 0.5860128617363344,
"repo_name": "vitalyvolkov/fontbakery",
"id": "1e2b4daae69f0ea9707b5ce24e0749c671b91d1d",
"size": "6927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "checker/tests/upstream_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8114"
},
{
"name": "JavaScript",
"bytes": "6468"
},
{
"name": "Python",
"bytes": "312438"
},
{
"name": "Shell",
"bytes": "234"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.template.defaultfilters import slugify
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
for storytemplate in orm.StoryTemplate.objects.all():
translation = storytemplate.storytemplatetranslation_set.all()[0]
storytemplate.slug = slugify(translation.title)
storytemplate.save()
def backwards(self, orm):
for storytemplate in orm.StoryTemplate.objects.all():
storytemplate.slug = ''
storytemplate.save()
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'storybase_asset.asset': {
'Meta': {'object_name': 'Asset'},
'asset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'asset_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'assets'", 'blank': 'True', 'to': "orm['storybase_asset.DataSet']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'default': "'CC BY-NC-SA'", 'max_length': '25'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'assets'", 'null': 'True', 'to': "orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'section_specific': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'storybase_asset.dataset': {
'Meta': {'object_name': 'DataSet'},
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dataset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dataset_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'links_to_file': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'datasets'", 'null': 'True', 'to': "orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'})
},
'storybase_geo.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['storybase_geo.GeoLevel']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'storybase_geo.location': {
'Meta': {'object_name': 'Location'},
'address': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'address2': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'lng': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'location_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations'", 'null': 'True', 'to': "orm['auth.User']"}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'raw': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'storybase_geo.place': {
'Meta': {'object_name': 'Place'},
'boundary': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'null': 'True', 'blank': 'True'}),
'children': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['storybase_geo.Place']", 'null': 'True', 'through': "orm['storybase_geo.PlaceRelation']", 'blank': 'True'}),
'geolevel': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'places'", 'null': 'True', 'to': "orm['storybase_geo.GeoLevel']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {}),
'place_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_geo.placerelation': {
'Meta': {'unique_together': "(('parent', 'child'),)", 'object_name': 'PlaceRelation'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place_parent'", 'to': "orm['storybase_geo.Place']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place_child'", 'to': "orm['storybase_geo.Place']"})
},
'storybase_help.help': {
'Meta': {'object_name': 'Help'},
'help_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'})
},
'storybase_story.container': {
'Meta': {'object_name': 'Container'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'storybase_story.section': {
'Meta': {'object_name': 'Section'},
'assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'sections'", 'blank': 'True', 'through': "orm['storybase_story.SectionAsset']", 'to': "orm['storybase_asset.Asset']"}),
'children': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['storybase_story.Section']", 'null': 'True', 'through': "orm['storybase_story.SectionRelation']", 'blank': 'True'}),
'help': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_help.Help']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layout': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.SectionLayout']", 'null': 'True'}),
'root': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'section_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sections'", 'to': "orm['storybase_story.Story']"}),
'template_section': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'template_for'", 'null': 'True', 'to': "orm['storybase_story.Section']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_story.sectionasset': {
'Meta': {'object_name': 'SectionAsset'},
'asset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_asset.Asset']"}),
'container': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Container']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Section']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_story.sectionlayout': {
'Meta': {'object_name': 'SectionLayout'},
'containers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'layouts'", 'blank': 'True', 'to': "orm['storybase_story.Container']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layout_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'storybase_story.sectionlayouttranslation': {
'Meta': {'object_name': 'SectionLayoutTranslation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'layout': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.SectionLayout']"}),
'name': ('storybase.fields.ShortTextField', [], {}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_story.sectionrelation': {
'Meta': {'object_name': 'SectionRelation'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'section_parent'", 'to': "orm['storybase_story.Section']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'section_child'", 'to': "orm['storybase_story.Section']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_story.sectiontranslation': {
'Meta': {'unique_together': "(('section', 'language'),)", 'object_name': 'SectionTranslation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Section']"}),
'title': ('storybase.fields.ShortTextField', [], {}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_story.story': {
'Meta': {'object_name': 'Story'},
'assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'stories'", 'null': 'True', 'to': "orm['auth.User']"}),
'byline': ('django.db.models.fields.TextField', [], {}),
'connected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'contact_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_asset.DataSet']"}),
'featured_assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'featured_in_stories'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_template': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'default': "'CC BY-NC-SA'", 'max_length': '25'}),
'locations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_geo.Location']"}),
'on_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_user.Organization']"}),
'places': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_geo.Place']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_user.Project']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'related_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'related_to'", 'blank': 'True', 'through': "orm['storybase_story.StoryRelation']", 'to': "orm['storybase_story.Story']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'story_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'structure_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'template_story': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'template_for'", 'null': 'True', 'to': "orm['storybase_story.Story']"}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_taxonomy.Category']"})
},
'storybase_story.storyrelation': {
'Meta': {'object_name': 'StoryRelation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'relation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'relation_type': ('django.db.models.fields.CharField', [], {'default': "'connected'", 'max_length': '25'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'target'", 'to': "orm['storybase_story.Story']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'source'", 'to': "orm['storybase_story.Story']"})
},
'storybase_story.storytemplate': {
'Meta': {'object_name': 'StoryTemplate'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']", 'null': 'True', 'blank': 'True'}),
'template_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'time_needed': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'})
},
'storybase_story.storytemplatetranslation': {
'Meta': {'object_name': 'StoryTemplateTranslation'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'story_template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.StoryTemplate']"}),
'tag_line': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'title': ('storybase.fields.ShortTextField', [], {}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_story.storytranslation': {
'Meta': {'unique_together': "(('story', 'language'),)", 'object_name': 'StoryTranslation'},
'call_to_action': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'connected_prompt': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_taxonomy.category': {
'Meta': {'object_name': 'Category'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['storybase_taxonomy.Category']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'storybase_taxonomy.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'tag_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_taxonomy.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'storybase_taxonomy_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['storybase_taxonomy.Tag']"})
},
'storybase_user.organization': {
'Meta': {'object_name': 'Organization'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'curated_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'curated_in_organizations'", 'blank': 'True', 'through': "orm['storybase_user.OrganizationStory']", 'to': "orm['storybase_story.Story']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organizations'", 'blank': 'True', 'to': "orm['auth.User']"}),
'on_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organization_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'storybase_user.organizationstory': {
'Meta': {'object_name': 'OrganizationStory'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Organization']"}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_user.project': {
'Meta': {'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'curated_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'curated_in_projects'", 'blank': 'True', 'through': "orm['storybase_user.ProjectStory']", 'to': "orm['storybase_story.Story']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects'", 'blank': 'True', 'to': "orm['auth.User']"}),
'on_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects'", 'blank': 'True', 'to': "orm['storybase_user.Organization']"}),
'project_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'storybase_user.projectstory': {
'Meta': {'object_name': 'ProjectStory'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Project']"}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['storybase_story']
symmetrical = True
| {
"content_hash": "66be9b1f7ac69c5c530a38fc6fa87170",
"timestamp": "",
"source": "github",
"line_count": 326,
"max_line_length": 268,
"avg_line_length": 87.97546012269939,
"alnum_prop": 0.5591701534170154,
"repo_name": "denverfoundation/storybase",
"id": "aa550e687dbf03bae27b12c3e09d7fffd81da09c",
"size": "28704",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "apps/storybase_story/migrations/0032_storytemplate_slugs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "285649"
},
{
"name": "Cucumber",
"bytes": "176820"
},
{
"name": "HTML",
"bytes": "286197"
},
{
"name": "JavaScript",
"bytes": "1623541"
},
{
"name": "Makefile",
"bytes": "1006"
},
{
"name": "Python",
"bytes": "3020016"
},
{
"name": "Shell",
"bytes": "23932"
}
],
"symlink_target": ""
} |
from tests.integration.integration_test_case import IntegrationTestCase
class TestQuestionnairePiping(IntegrationTestCase):
def test_given_quotes_in_answer_when_piped_into_page_then_html_escaped_quotes_on_page(self):
# Given
self.launchSurvey('test', 'multiple_piping')
self.post(action='start_questionnaire')
self.post({'address-line-1': '44 hill side'})
self.post(action='save_continue')
# When
self.post({'first-text': 'Joe', 'second-text': 'Bloggs "Junior"'})
self.post(action='save_continue')
# Then
self.get(self.last_url)
self.assertStatusOK()
self.assertInSelector(
'Does <em>Joe Bloggs "Junior"</em> live at <em>44 hill side</em>',
id='container-multiple-piping-answer',
)
def test_given_backslash_in_answer_when_piped_into_page_then_backslash_on_page(self):
# Given
self.launchSurvey('test', 'multiple_piping')
self.post(action='start_questionnaire')
self.post({'address-line-1': '44 hill side'})
self.post(action='save_continue')
# When
self.post({'first-text': 'Joe', 'second-text': 'Bloggs\\John Doe'})
self.post(action='save_continue')
# Then
self.get(self.last_url)
self.assertStatusOK()
self.assertInSelector(
'Joe Bloggs\\John Doe',
id='container-multiple-piping-answer'
)
def test_answer_piped_into_option(self):
# Given
self.launchSurvey('test', 'multiple_piping')
self.post(action='start_questionnaire')
self.post({'address-line-1': '44 hill side', 'town-city': 'newport'})
self.post(action='save_continue')
# When
self.post({'first-text': 'Joe', 'second-text': 'Bloggs\\John Doe'})
self.post(action='save_continue')
# Then
self.get(self.last_url)
self.assertStatusOK()
self.assertInSelector(
'44 hill side, newport',
id='label-multiple-piping-answer-0'
)
def test_answer_piped_into_option_on_validation_error(self):
"""Regression test to assert that the previous answer is still piped into
the option label on the form it is rendered with a validation error
"""
# Given
self.launchSurvey('test', 'multiple_piping')
self.post(action='start_questionnaire')
self.post({'address-line-1': '44 hill side', 'town-city': 'newport'})
self.post(action='save_continue')
self.post({'first-text': 'Joe', 'second-text': 'Bloggs\\John Doe'})
self.post(action='save_continue')
# When
self.post({})
# Then
self.assertStatusOK()
self.assertInSelector(
'44 hill side, newport',
id='label-multiple-piping-answer-0'
)
| {
"content_hash": "ba3e8474e8314a21280ac8d35a453557",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 96,
"avg_line_length": 35.15853658536585,
"alnum_prop": 0.5962539021852237,
"repo_name": "ONSdigital/eq-survey-runner",
"id": "655dab88d7ebf6f1685779cd231caa66e7f1a3d2",
"size": "2883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/questionnaire/test_questionnaire_piping.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "520"
},
{
"name": "HTML",
"bytes": "236859"
},
{
"name": "JavaScript",
"bytes": "423942"
},
{
"name": "Python",
"bytes": "1409591"
},
{
"name": "SCSS",
"bytes": "25858"
},
{
"name": "Shell",
"bytes": "10196"
}
],
"symlink_target": ""
} |
"""Utility function for installing MathJax javascript library into
your IPython nbextensions directory, for offline use.
Authors:
* Min RK
* Mark Sienkiewicz
* Matthias Bussonnier
To download and install MathJax:
From Python:
>>> from IPython.external.mathjax import install_mathjax
>>> install_mathjax()
From the command line:
$ python -m IPython.external.mathjax
To a specific location:
$ python -m IPython.external.mathjax -i /usr/share/
will install mathjax to /usr/share/mathjax
To install MathJax from a file you have already downloaded:
$ python -m IPython.external.mathjax mathjax-xxx.tar.gz
$ python -m IPython.external.mathjax mathjax-xxx.zip
It will not install MathJax if it is already there. Use -r to
replace the existing copy of MathJax.
To find the directory where IPython would like MathJax installed:
$ python -m IPython.external.mathjax -d
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import argparse
import os
import shutil
import sys
import tarfile
import zipfile
from IPython.utils.path import get_ipython_dir
try:
from urllib.request import urlopen # Py 3
except ImportError:
from urllib2 import urlopen
#-----------------------------------------------------------------------------
#
#-----------------------------------------------------------------------------
# Where mathjax will be installed
nbextensions = os.path.join(get_ipython_dir(), 'nbextensions')
default_dest = os.path.join(nbextensions, 'mathjax')
# Test for access to install mathjax
def prepare_dest(dest, replace=False):
"""prepare the destination folder for mathjax install
Returns False if mathjax appears to already be installed and there is nothing to do,
True otherwise.
"""
parent = os.path.abspath(os.path.join(dest, os.path.pardir))
if not os.path.exists(parent):
os.makedirs(parent)
if os.path.exists(dest):
if replace:
print("removing existing MathJax at %s" % dest)
shutil.rmtree(dest)
return True
else:
mathjax_js = os.path.join(dest, 'MathJax.js')
if not os.path.exists(mathjax_js):
raise IOError(
"%s exists, but does not contain MathJax.js" % dest)
print("%s already exists" % mathjax_js)
return False
else:
return True
def extract_tar(fd, dest):
"""extract a tarball from filelike `fd` to destination `dest`"""
# use 'r|gz' stream mode, because socket file-like objects can't seek:
tar = tarfile.open(fileobj=fd, mode='r|gz')
# The first entry in the archive is the top-level dir
topdir = tar.firstmember.path
# extract the archive (contains a single directory) to the destination
# directory
parent = os.path.abspath(os.path.join(dest, os.path.pardir))
tar.extractall(parent)
# it will be mathjax-MathJax-<sha>, rename to just mathjax
os.rename(os.path.join(parent, topdir), dest)
def extract_zip(fd, dest):
"""extract a zip file from filelike `fd` to destination `dest`"""
z = zipfile.ZipFile(fd, 'r')
# The first entry in the archive is the top-level dir
topdir = z.namelist()[0]
# extract the archive (contains a single directory) to the static/
# directory
parent = os.path.abspath(os.path.join(dest, os.path.pardir))
z.extractall(parent)
# it will be mathjax-MathJax-<sha>, rename to just mathjax
os.rename(os.path.join(parent, topdir), dest)
def install_mathjax(tag='2.4.0', dest=default_dest, replace=False, file=None, extractor=extract_tar):
"""Download and/or install MathJax for offline use.
This will install mathjax to the nbextensions dir in your IPYTHONDIR.
MathJax is a ~15MB download, and ~150MB installed.
Parameters
----------
replace : bool [False]
Whether to remove and replace an existing install.
dest : str [IPYTHONDIR/nbextensions/mathjax]
Where to install mathjax
tag : str ['2.4.0']
Which tag to download. Default is '2.4.0', the current stable release,
but alternatives include 'v1.1a' and 'master'.
file : file like object [ defualt to content of https://github.com/mathjax/MathJax/tarball/#{tag}]
File handle from which to untar/unzip/... mathjax
extractor : function
Method to use to untar/unzip/... `file`
"""
try:
anything_to_do = prepare_dest(dest, replace)
except OSError as e:
print("ERROR %s, require write access to %s" % (e, dest))
return 1
else:
if not anything_to_do:
return 0
if file is None:
# download mathjax
mathjax_url = "https://github.com/mathjax/MathJax/archive/%s.tar.gz" % tag
print("Downloading mathjax source from %s" % mathjax_url)
response = urlopen(mathjax_url)
file = response.fp
print("Extracting to %s" % dest)
extractor(file, dest)
return 0
def main():
parser = argparse.ArgumentParser(
description="""Install mathjax from internet or local archive""",
)
parser.add_argument(
'-i',
'--install-dir',
default=nbextensions,
help='custom installation directory. Mathjax will be installed in here/mathjax')
parser.add_argument(
'-d',
'--print-dest',
action='store_true',
help='print where mathjax would be installed and exit')
parser.add_argument(
'-r',
'--replace',
action='store_true',
help='Whether to replace current mathjax if it already exists')
parser.add_argument('filename',
help="the local tar/zip-ball filename containing mathjax",
nargs='?',
metavar='filename')
pargs = parser.parse_args()
dest = os.path.join(pargs.install_dir, 'mathjax')
if pargs.print_dest:
print(dest)
return
# remove/replace existing mathjax?
replace = pargs.replace
# do it
if pargs.filename:
fname = pargs.filename
# automatically detect zip/tar - could do something based
# on file content, but really not cost-effective here.
if fname.endswith('.zip'):
extractor = extract_zip
else:
extractor = extract_tar
# do it
return install_mathjax(file=open(fname, "rb"), replace=replace, extractor=extractor, dest=dest)
else:
return install_mathjax(replace=replace, dest=dest)
if __name__ == '__main__':
sys.exit(main())
__all__ = ['install_mathjax', 'main', 'default_dest']
| {
"content_hash": "5aeb26ee1fe6e07ddeb6049dbf762110",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 103,
"avg_line_length": 29.775,
"alnum_prop": 0.6059333893087042,
"repo_name": "mattvonrocketstein/smash",
"id": "daf4744cd54c3c0e46709a01c041831af0ebccd8",
"size": "7164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smashlib/ipy3x/external/mathjax.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "162188"
},
{
"name": "HTML",
"bytes": "32106"
},
{
"name": "JavaScript",
"bytes": "1615935"
},
{
"name": "Makefile",
"bytes": "550"
},
{
"name": "Python",
"bytes": "4934398"
},
{
"name": "Shell",
"bytes": "2990"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class DtickValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(
self,
plotly_name="dtick",
parent_name="scatterpolargl.marker.colorbar",
**kwargs,
):
super(DtickValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"tickmode": "linear"}),
**kwargs,
)
| {
"content_hash": "9facd8774fae8474f0b9b0bb25520aa2",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 78,
"avg_line_length": 31.11764705882353,
"alnum_prop": 0.5784499054820416,
"repo_name": "plotly/plotly.py",
"id": "0a2c3842632a4c5d1527e7a8c6d6db57154babcc",
"size": "529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scatterpolargl/marker/colorbar/_dtick.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import argparse
import json
import os
from os import path
import re
import subprocess
import sys
KEYNAME = 'paperspace-termux-cli'
VMNAME = 'ps-game'
## todo: generalize to include azure, bokken :D
def fatal_paperspace(error):
sys.exit(f"fatal (paperspace): {error}")
def run_paperspace(args):
result = subprocess.run(['paperspace'] + args, stdout=subprocess.PIPE)
parsed = json.loads(result.stdout)[0]
error = parsed['error']
if error:
fatal_paperspace(error)
return parsed
def init_paperspace():
for line in open(path.expanduser('~/dotfiles/private/api/keys')):
m = re.search(f'{KEYNAME}\s+(\w+)\s*=\s*(\S+)', line)
if m:
os.environ[m.group(1)] = m.group(2)
return
fatal_paperspace('Unable to find API key')
def get_paperspace_vm_from_name(name=VMNAME):
for item in run_paperspace(['machines', 'list']):
if item['name'] == name:
return item
def get_paperspace_vm_id_from_name(name=VMNAME):
return get_paperspace_vm_from_name(name)['id']
def waitfor_paperspace_status(machineId, status):
#paperspace machines waitfor --machineId $VMID --state ready
pass
def start_cmd(args):
init_paperspace()
vmid = get_paperspace_vm_id_from_name()
#paperspace machines start --machineId $VMID
waitfor_paperspace_status(vmid, 'ready')
print("start!")
def stop_cmd(args):
init_paperspace()
vmid = get_paperspace_vm_id_from_name()
#paperspace machines stop --machineId $VMID
waitfor_paperspace_status(vmid, 'off')
print("stop!")
def status_cmd(args):
init_paperspace()
vm = get_paperspace_vm_from_name()
print(vm.state)
parser = argparse.ArgumentParser(description='Simple helpers to mess with my VMs')
subparsers = parser.add_subparsers()
start_parser = subparsers.add_parser('start')
start_parser.set_defaults(func=start_cmd)
stop_parser = subparsers.add_parser('stop')
stop_parser.set_defaults(func=stop_cmd)
status_parser = subparsers.add_parser('status', aliases=['st'])
status_parser.set_defaults(func=status_cmd)
args = parser.parse_args()
args.func(args)
| {
"content_hash": "db527140f34a22756f1851f42a1b2bc7",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 82,
"avg_line_length": 28.373333333333335,
"alnum_prop": 0.6837406015037594,
"repo_name": "scottbilas/dotfiles",
"id": "f368847ad7770876715fb0cb09f7c788c2ceccd4",
"size": "2152",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "scripts/paperspace.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AutoHotkey",
"bytes": "865"
},
{
"name": "Batchfile",
"bytes": "430"
},
{
"name": "JavaScript",
"bytes": "7086"
},
{
"name": "Lua",
"bytes": "53818"
},
{
"name": "Perl",
"bytes": "95482"
},
{
"name": "PowerShell",
"bytes": "37956"
},
{
"name": "Python",
"bytes": "172383"
},
{
"name": "Shell",
"bytes": "80975"
},
{
"name": "Vim script",
"bytes": "9611"
}
],
"symlink_target": ""
} |
import setuptools
with open("README.rst", "r") as fh:
long_description = fh.read()
namespaces = ['compatibility_lib']
setuptools.setup(
name="compatibility_lib",
version="0.1.11",
author="Cloud Python",
description="A library to get and store the dependency compatibility "
"status data to BigQuery.",
long_description=long_description,
license="Apache-2.0",
include_package_data=True,
url="https://github.com/GoogleCloudPlatform/cloud-opensource-python/tree/"
"master/compatibility_lib",
packages=setuptools.find_packages(),
namespace_packages=namespaces,
classifiers=(
"Intended Audience :: Developers",
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
),
)
| {
"content_hash": "5034a0ace773158114b09cd836cda349",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 78,
"avg_line_length": 29.379310344827587,
"alnum_prop": 0.6549295774647887,
"repo_name": "GoogleCloudPlatform/cloud-opensource-python",
"id": "030867e69e79df070b98f8918858a057d0d02300",
"size": "1427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compatibility_lib/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "33245"
},
{
"name": "Dockerfile",
"bytes": "3146"
},
{
"name": "HTML",
"bytes": "58562"
},
{
"name": "JavaScript",
"bytes": "4919"
},
{
"name": "Python",
"bytes": "400736"
},
{
"name": "Shell",
"bytes": "2798"
}
],
"symlink_target": ""
} |
"""
Flip API
Flip # noqa: E501
The version of the OpenAPI document: 3.1
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_flip.configuration import Configuration
class CreateEncodingBody(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'video_id': 'str',
'profile_id': 'str',
'profile_name': 'str'
}
attribute_map = {
'video_id': 'video_id',
'profile_id': 'profile_id',
'profile_name': 'profile_name'
}
def __init__(self, video_id=None, profile_id=None, profile_name=None, local_vars_configuration=None): # noqa: E501
"""CreateEncodingBody - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._video_id = None
self._profile_id = None
self._profile_name = None
self.discriminator = None
self.video_id = video_id
if profile_id is not None:
self.profile_id = profile_id
if profile_name is not None:
self.profile_name = profile_name
@property
def video_id(self):
"""Gets the video_id of this CreateEncodingBody. # noqa: E501
Id of a Video that will be encoded. # noqa: E501
:return: The video_id of this CreateEncodingBody. # noqa: E501
:rtype: str
"""
return self._video_id
@video_id.setter
def video_id(self, video_id):
"""Sets the video_id of this CreateEncodingBody.
Id of a Video that will be encoded. # noqa: E501
:param video_id: The video_id of this CreateEncodingBody. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and video_id is None: # noqa: E501
raise ValueError("Invalid value for `video_id`, must not be `None`") # noqa: E501
self._video_id = video_id
@property
def profile_id(self):
"""Gets the profile_id of this CreateEncodingBody. # noqa: E501
Id of a Profile that will be used for encoding. # noqa: E501
:return: The profile_id of this CreateEncodingBody. # noqa: E501
:rtype: str
"""
return self._profile_id
@profile_id.setter
def profile_id(self, profile_id):
"""Sets the profile_id of this CreateEncodingBody.
Id of a Profile that will be used for encoding. # noqa: E501
:param profile_id: The profile_id of this CreateEncodingBody. # noqa: E501
:type: str
"""
self._profile_id = profile_id
@property
def profile_name(self):
"""Gets the profile_name of this CreateEncodingBody. # noqa: E501
A name of a Profile that will be used for encoding. # noqa: E501
:return: The profile_name of this CreateEncodingBody. # noqa: E501
:rtype: str
"""
return self._profile_name
@profile_name.setter
def profile_name(self, profile_name):
"""Sets the profile_name of this CreateEncodingBody.
A name of a Profile that will be used for encoding. # noqa: E501
:param profile_name: The profile_name of this CreateEncodingBody. # noqa: E501
:type: str
"""
self._profile_name = profile_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateEncodingBody):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CreateEncodingBody):
return True
return self.to_dict() != other.to_dict()
| {
"content_hash": "dca0d32b2d2dcd12fba61343c3b12a80",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 119,
"avg_line_length": 30.084269662921347,
"alnum_prop": 0.576657329598506,
"repo_name": "Telestream/telestream-cloud-python-sdk",
"id": "7f4c4bf41b175f48173b3b7426e8c4bfe546d02d",
"size": "5372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "telestream_cloud_flip_sdk/telestream_cloud_flip/models/create_encoding_body.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1339719"
},
{
"name": "Shell",
"bytes": "6712"
}
],
"symlink_target": ""
} |
"""Class for representing a single entity in the Cloud Datastore.
Entities are akin to rows in a relational database,
storing the actual instance of data.
Each entity is officially represented with
a :class:`gclouddatastore.key.Key` class,
however it is possible that you might create
an Entity with only a partial Key
(that is, a Key with a Kind,
and possibly a parent, but without an ID).
Entities in this API act like dictionaries
with extras built in that allow you to
delete or persist the data stored on the entity.
"""
from datetime import datetime
from gclouddatastore import datastore_v1_pb2 as datastore_pb
from gclouddatastore.key import Key
class Entity(dict):
"""
:type dataset: :class:`gclouddatastore.dataset.Dataset`
:param dataset: The dataset in which this entity belongs.
:type kind: string
:param kind: The kind of entity this is, akin to a table name in a
relational database.
Entities are mutable and act like a subclass of a dictionary.
This means you could take an existing entity and change the key
to duplicate the object.
This can be used on its own, however it is likely easier to use
the shortcut methods provided by :class:`gclouddatastore.dataset.Dataset`
such as:
- :func:`gclouddatastore.dataset.Dataset.entity` to create a new entity.
>>> dataset.entity('MyEntityKind')
<Entity[{'kind': 'MyEntityKind'}] {}>
- :func:`gclouddatastore.dataset.Dataset.get_entity` to retrive an existing entity.
>>> dataset.get_entity(key)
<Entity[{'kind': 'EntityKind', id: 1234}] {'property': 'value'}>
You can the set values on the entity just like you would on any other dictionary.
>>> entity['age'] = 20
>>> entity['name'] = 'JJ'
>>> entity
<Entity[{'kind': 'EntityKind', id: 1234}] {'age': 20, 'name': 'JJ'}>
And you can cast an entity to a regular Python dictionary with the `dict` builtin:
>>> dict(entity)
{'age': 20, 'name': 'JJ'}
"""
def __init__(self, dataset=None, kind=None):
if dataset and kind:
self._key = Key(dataset=dataset).kind(kind)
else:
self._key = None
def dataset(self):
"""Get the :class:`gclouddatastore.dataset.Dataset` in which this entity belonds.
.. note::
This is based on the :class:`gclouddatastore.key.Key` set on the entity.
That means that if you have no key set, the dataset might be `None`.
It also means that if you change the key on the entity, this will refer
to that key's dataset.
"""
if self.key():
return self.key().dataset()
def key(self, key=None):
"""Get or set the :class:`gclouddatastore.key.Key` on the current entity.
:type key: :class:`glcouddatastore.key.Key`
:param key: The key you want to set on the entity.
:returns: Either the current key or the :class:`Entity`.
>>> entity.key(my_other_key) # This returns the original entity.
<Entity[{'kind': 'OtherKeyKind', 'id': 1234}] {'property': 'value'}>
>>> entity.key() # This returns the key.
<Key[{'kind': 'OtherKeyKind', 'id': 1234}]>
"""
if key:
self._key = key
return self
else:
return self._key
def kind(self):
"""Get the kind of the current entity.
.. note::
This relies entirely on
the :class:`gclouddatastore.key.Key`
set on the entity.
That means that we're not storing the kind of the entity at all,
just the properties and a pointer to a Key
which knows its Kind.
"""
if self.key():
return self.key().kind()
@classmethod
def from_key(cls, key):
"""Factory method for creating an entity based on the :class:`gclouddatastore.key.Key`.
:type key: :class:`gclouddatastore.key.Key`
:param key: The key for the entity.
:returns: The :class:`Entity` derived from the :class:`gclouddatastore.key.Key`.
"""
return cls().key(key)
@classmethod
def from_protobuf(cls, pb, dataset=None):
"""Factory method for creating an entity based on a protobuf.
The protobuf should be one returned from the Cloud Datastore Protobuf API.
:type key: :class:`gclouddatastore.datastore_v1_pb2.Entity`
:param key: The Protobuf representing the entity.
:returns: The :class:`Entity` derived from the :class:`gclouddatastore.datastore_v1_pb2.Entity`.
"""
# This is here to avoid circular imports.
from gclouddatastore import helpers
key = Key.from_protobuf(pb.key, dataset=dataset)
entity = cls.from_key(key)
for property_pb in pb.property:
value = helpers.get_value_from_protobuf(property_pb)
entity[property_pb.name] = value
return entity
def reload(self):
"""Reloads the contents of this entity from the datastore.
This method takes the :class:`gclouddatastore.key.Key`, loads all
properties from the Cloud Datastore, and sets the updated properties on
the current object.
.. warning::
This will override any existing properties if a different value exists
remotely, however it will *not* override any properties that exist
only locally.
"""
# Note that you must have a valid key, otherwise this makes no sense.
entity = self.dataset().get_entities(self.key().to_protobuf())
# TODO(jjg): Raise an error if something dumb happens.
if entity:
self.update(entity)
return self
def save(self):
"""Save the entity in the Cloud Datastore.
:rtype: :class:`gclouddatastore.entity.Entity`
:returns: The entity with a possibly updated Key.
"""
key_pb = self.dataset().connection().save_entity(
dataset_id=self.dataset().id(), key_pb=self.key().to_protobuf(),
properties=dict(self))
# If we are in a transaction and the current entity needs an
# automatically assigned ID, tell the transaction where to put that.
transaction = self.dataset().connection().transaction()
if transaction and self.key().is_partial():
transaction.add_auto_id_entity(self)
if isinstance(key_pb, datastore_pb.Key):
updated_key = Key.from_protobuf(key_pb)
# Update the path (which may have been altered).
key = self.key().path(updated_key.path())
self.key(key)
return self
def delete(self):
"""Delete the entity in the Cloud Datastore.
.. note::
This is based entirely off of the :class:`gclouddatastore.key.Key` set
on the entity. Whatever is stored remotely using the key on the entity
will be deleted.
"""
self.dataset().connection().delete_entity(
dataset_id=self.dataset().id(), key_pb=self.key().to_protobuf())
def __repr__(self):
# TODO: Make sure that this makes sense.
# An entity should have a key all the time (even if it's partial).
if self.key():
return '<Entity%s %s>' % (self.key().path(), super(Entity, self).__repr__())
else:
return '<Entity %s>' % (super(Entity, self).__repr__())
| {
"content_hash": "3c2fa5b0ba7e7ef2bf49a4a45515af42",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 100,
"avg_line_length": 32.17674418604651,
"alnum_prop": 0.6704249783174328,
"repo_name": "jgeewax/gclouddatastore",
"id": "b9543bb75e8b0decebb0e2670feca1b1a49de49d",
"size": "6918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gclouddatastore/entity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "64384"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from restaurant.models import Suggestion, MenuCategory, FoodCategory, FoodItem, FoodMenu, ItemChoice, RestaurantInfo, BusinessHours, AlbumGallery, ImageGallery, LikeFoodMenu
class FoodMenuInline(admin.StackedInline):
model = FoodMenu
extra = 0
class FoodItemAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['name']}),
('Description', {'fields': ['description']}),
]
inlines = [FoodMenuInline]
class SuggestionAdmin(admin.ModelAdmin):
readonly_fields=('post_date',)
admin.site.register(Suggestion, SuggestionAdmin)
admin.site.register(FoodItem)
admin.site.register(FoodCategory)
admin.site.register(MenuCategory)
admin.site.register(FoodMenu)
admin.site.register(ItemChoice)
admin.site.register(RestaurantInfo)
admin.site.register(BusinessHours)
admin.site.register(AlbumGallery)
admin.site.register(ImageGallery)
admin.site.register(LikeFoodMenu) | {
"content_hash": "95943b18f76e29a5b878b4c485f3782c",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 173,
"avg_line_length": 33.785714285714285,
"alnum_prop": 0.7610993657505285,
"repo_name": "prinnb/cs242-final-project",
"id": "b7ba066a456b993c797634e53b19095381bc2d6a",
"size": "946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "restaurant/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7522"
},
{
"name": "JavaScript",
"bytes": "14872"
},
{
"name": "Python",
"bytes": "24365"
}
],
"symlink_target": ""
} |
from cpp11_thread_local import *
t = ThreadLocals()
if t.stval != 11:
raise RuntimeError
if t.tsval != 22:
raise RuntimeError
if t.tscval99 != 99:
raise RuntimeError
cvar.etval = -11
if cvar.etval != -11:
raise RuntimeError
cvar.stval = -22
if cvar.stval != -22:
raise RuntimeError
cvar.tsval = -33
if cvar.tsval != -33:
raise RuntimeError
cvar.etval = -44
if cvar.etval != -44:
raise RuntimeError
cvar.teval = -55
if cvar.teval != -55:
raise RuntimeError
cvar.ectval = -66
if cvar.ectval != -66:
raise RuntimeError
cvar.ecpptval = -66
if cvar.ecpptval != -66:
raise RuntimeError
| {
"content_hash": "c5bd190b5932ed38622745a2be9cc9e9",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 32,
"avg_line_length": 16.972972972972972,
"alnum_prop": 0.6656050955414012,
"repo_name": "DEKHTIARJonathan/BilletterieUTC",
"id": "83f2390ecf07dc8cecef433a5ac43750c9ffec94",
"size": "628",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "badgingServer/Install/swigwin-3.0.7/Examples/test-suite/python/cpp11_thread_local_runme.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "505"
},
{
"name": "C",
"bytes": "1489570"
},
{
"name": "C#",
"bytes": "323243"
},
{
"name": "C++",
"bytes": "2646678"
},
{
"name": "CSS",
"bytes": "1309792"
},
{
"name": "Common Lisp",
"bytes": "13780"
},
{
"name": "D",
"bytes": "260374"
},
{
"name": "DIGITAL Command Language",
"bytes": "16078"
},
{
"name": "Forth",
"bytes": "2411"
},
{
"name": "Go",
"bytes": "95670"
},
{
"name": "Groff",
"bytes": "17548"
},
{
"name": "HTML",
"bytes": "8474268"
},
{
"name": "Java",
"bytes": "517584"
},
{
"name": "JavaScript",
"bytes": "1574272"
},
{
"name": "Limbo",
"bytes": "2902"
},
{
"name": "Lua",
"bytes": "103853"
},
{
"name": "M",
"bytes": "58261"
},
{
"name": "Makefile",
"bytes": "193313"
},
{
"name": "Mathematica",
"bytes": "113"
},
{
"name": "Matlab",
"bytes": "49071"
},
{
"name": "Mercury",
"bytes": "4136"
},
{
"name": "OCaml",
"bytes": "25948"
},
{
"name": "Objective-C",
"bytes": "9721"
},
{
"name": "PHP",
"bytes": "336290"
},
{
"name": "Perl",
"bytes": "140021"
},
{
"name": "Perl6",
"bytes": "6403"
},
{
"name": "Pike",
"bytes": "6601"
},
{
"name": "Python",
"bytes": "271706"
},
{
"name": "R",
"bytes": "6053"
},
{
"name": "Ruby",
"bytes": "129514"
},
{
"name": "SQLPL",
"bytes": "10237"
},
{
"name": "Scheme",
"bytes": "81765"
},
{
"name": "Scilab",
"bytes": "84725"
},
{
"name": "Shell",
"bytes": "86284"
},
{
"name": "Standard ML",
"bytes": "2587"
},
{
"name": "Tcl",
"bytes": "38028"
},
{
"name": "Yacc",
"bytes": "211262"
}
],
"symlink_target": ""
} |
"""Python configuration for RatioExpressionInput interaction."""
from __future__ import annotations
from extensions.interactions import base
from typing import List, Optional
MYPY = False
if MYPY: # pragma: no cover
from extensions import domain
class RatioExpressionInput(base.BaseInteraction):
"""Interaction for ratio input."""
name: str = 'Ratio Expression Input'
description: str = 'Allow learners to enter ratios.'
display_mode: str = base.DISPLAY_MODE_INLINE
is_trainable: bool = False
_dependency_ids: List[str] = []
answer_type: str = 'RatioExpression'
instructions: Optional[str] = None
narrow_instructions: Optional[str] = None
needs_summary: bool = False
can_have_solution: bool = True
show_generic_submit_button: bool = True
_customization_arg_specs: List[domain.CustomizationArgSpecsDict] = [{
'name': 'placeholder',
'description': 'Custom placeholder text (optional)',
'schema': {
'type': 'custom',
'obj_type': 'SubtitledUnicode'
},
'default_value': {
'content_id': None,
'unicode_str': ''
}
}, {
'name': 'numberOfTerms',
'description': (
'The number of elements that the answer must have.'
' If set to 0, a ratio of any length will be accepted.'
' The number of elements should not be greater than 10.'),
'schema': {
'type': 'int',
'validators': [{
'id': 'is_at_least',
'min_value': 0,
}, {
'id': 'is_at_most',
'max_value': 10,
}],
},
'default_value': 0,
}]
_answer_visualization_specs: List[base.AnswerVisualizationSpecsDict] = []
| {
"content_hash": "596d2953d2ca85b9b0a423e2a26e0e1a",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 77,
"avg_line_length": 30.677966101694917,
"alnum_prop": 0.5756906077348066,
"repo_name": "oppia/oppia",
"id": "5ef67990f7190e8d603246eb4347c643395392f3",
"size": "2414",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "extensions/interactions/RatioExpressionInput/RatioExpressionInput.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "476480"
},
{
"name": "HTML",
"bytes": "2092923"
},
{
"name": "JavaScript",
"bytes": "1247116"
},
{
"name": "PEG.js",
"bytes": "71377"
},
{
"name": "Python",
"bytes": "17628953"
},
{
"name": "Shell",
"bytes": "2240"
},
{
"name": "TypeScript",
"bytes": "15541372"
}
],
"symlink_target": ""
} |
from django.test import TestCase
class BaseTestCase(TestCase):
def test_response(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
from django.contrib.auth.models import User
user = User.objects.create(username='testuser')
user.set_password('testpassword')
user.save()
self.client.login(username='testuser', password='testpassword')
response = self.client.get('/')
self.assertContains(response, 'logout')
response = self.client.get('/logout', follow=True)
self.assertNotContains(response, 'logout')
| {
"content_hash": "dd9d86c0361c1c881e5d89092bc96e2e",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 71,
"avg_line_length": 28.727272727272727,
"alnum_prop": 0.6550632911392406,
"repo_name": "klen/starter-python-django",
"id": "1d51170f49d425cabaff05fbca4a921c1176db0f",
"size": "632",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "starter/templates/python-django/main/tests/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "1"
},
{
"name": "Python",
"bytes": "17412"
}
],
"symlink_target": ""
} |
import logging
import sys
# Borrowed from (modified) https://www.electricmonk.nl/log/2011/08/14/redirect-stdout-and-stderr-to-a-logger-in-python/
# This class is to redirect stdout and stderr to a file
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
def flush(self):
pass
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s:%(levelname)s:%(name)s:%(message)s',
filename="out.log",
filemode='a'
)
def redirectOutputToLog():
stdout_logger = logging.getLogger('STDOUT')
sl = StreamToLogger(stdout_logger, logging.INFO)
sys.stdout = sl
stderr_logger = logging.getLogger('STDERR')
sl = StreamToLogger(stderr_logger, logging.ERROR)
sys.stderr = sl | {
"content_hash": "7c4491dc9264fd3dc921b15d730cd89e",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 119,
"avg_line_length": 27.17948717948718,
"alnum_prop": 0.6650943396226415,
"repo_name": "aosoble/identitybot",
"id": "40fd138ec0766fd1457952ea25046f0f480cea22",
"size": "1060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "logger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6593"
}
],
"symlink_target": ""
} |
from collections import namedtuple
import re
from contextlib2 import ExitStack
import numpy as np
import pandas as pd
import sqlalchemy as sa
from toolz import first
from zipline.errors import AssetDBVersionError
from zipline.assets.asset_db_schema import (
ASSET_DB_VERSION,
asset_db_table_names,
asset_router,
equities as equities_table,
equity_symbol_mappings,
futures_contracts as futures_contracts_table,
futures_exchanges,
futures_root_symbols,
metadata,
version_info,
)
from zipline.utils.preprocess import preprocess
from zipline.utils.range import from_tuple, intersecting_ranges
from zipline.utils.sqlite_utils import coerce_string_to_eng
# Define a namedtuple for use with the load_data and _load_data methods
AssetData = namedtuple(
'AssetData', (
'equities',
'equities_mappings',
'futures',
'exchanges',
'root_symbols',
),
)
SQLITE_MAX_VARIABLE_NUMBER = 999
symbol_columns = frozenset({
'symbol',
'company_symbol',
'share_class_symbol',
})
mapping_columns = symbol_columns | {'start_date', 'end_date'}
# Default values for the equities DataFrame
_equities_defaults = {
'symbol': None,
'asset_name': None,
'start_date': 0,
'end_date': 2 ** 62 - 1,
'first_traded': None,
'auto_close_date': None,
# the canonical exchange name, like "NYSE"
'exchange': None,
# optional, something like "New York Stock Exchange"
'exchange_full': None,
}
# Default values for the futures DataFrame
_futures_defaults = {
'symbol': None,
'root_symbol': None,
'asset_name': None,
'start_date': 0,
'end_date': 2 ** 62 - 1,
'first_traded': None,
'exchange': None,
'notice_date': None,
'expiration_date': None,
'auto_close_date': None,
'tick_size': None,
'multiplier': 1,
}
# Default values for the exchanges DataFrame
_exchanges_defaults = {
'timezone': None,
}
# Default values for the root_symbols DataFrame
_root_symbols_defaults = {
'root_symbol_id': None,
'sector': None,
'description': None,
'exchange': None,
}
# Fuzzy symbol delimiters that may break up a company symbol and share class
_delimited_symbol_delimiters_regex = re.compile(r'[./\-_]')
_delimited_symbol_default_triggers = frozenset({np.nan, None, ''})
def split_delimited_symbol(symbol):
"""
Takes in a symbol that may be delimited and splits it in to a company
symbol and share class symbol. Also returns the fuzzy symbol, which is the
symbol without any fuzzy characters at all.
Parameters
----------
symbol : str
The possibly-delimited symbol to be split
Returns
-------
company_symbol : str
The company part of the symbol.
share_class_symbol : str
The share class part of a symbol.
"""
# return blank strings for any bad fuzzy symbols, like NaN or None
if symbol in _delimited_symbol_default_triggers:
return '', ''
symbol = symbol.upper()
split_list = re.split(
pattern=_delimited_symbol_delimiters_regex,
string=symbol,
maxsplit=1,
)
# Break the list up in to its two components, the company symbol and the
# share class symbol
company_symbol = split_list[0]
if len(split_list) > 1:
share_class_symbol = split_list[1]
else:
share_class_symbol = ''
return company_symbol, share_class_symbol
def _generate_output_dataframe(data_subset, defaults):
"""
Generates an output dataframe from the given subset of user-provided
data, the given column names, and the given default values.
Parameters
----------
data_subset : DataFrame
A DataFrame, usually from an AssetData object,
that contains the user's input metadata for the asset type being
processed
defaults : dict
A dict where the keys are the names of the columns of the desired
output DataFrame and the values are the default values to insert in the
DataFrame if no user data is provided
Returns
-------
DataFrame
A DataFrame containing all user-provided metadata, and default values
wherever user-provided metadata was missing
"""
# The columns provided.
cols = set(data_subset.columns)
desired_cols = set(defaults)
# Drop columns with unrecognised headers.
data_subset.drop(cols - desired_cols,
axis=1,
inplace=True)
# Get those columns which we need but
# for which no data has been supplied.
for col in desired_cols - cols:
# write the default value for any missing columns
data_subset[col] = defaults[col]
return data_subset
def _check_asset_group(group):
row = group.sort_values('end_date').iloc[-1]
row.start_date = group.start_date.min()
row.end_date = group.end_date.max()
row.drop(list(symbol_columns), inplace=True)
return row
def _format_range(r):
return (
str(pd.Timestamp(r.start, unit='ns')),
str(pd.Timestamp(r.stop, unit='ns')),
)
def _split_symbol_mappings(df):
"""Split out the symbol: sid mappings from the raw data.
Parameters
----------
df : pd.DataFrame
The dataframe with multiple rows for each symbol: sid pair.
Returns
-------
asset_info : pd.DataFrame
The asset info with one row per asset.
symbol_mappings : pd.DataFrame
The dataframe of just symbol: sid mappings. The index will be
the sid, then there will be three columns: symbol, start_date, and
end_date.
"""
mappings = df[list(mapping_columns)]
ambigious = {}
for symbol in mappings.symbol.unique():
persymbol = mappings[mappings.symbol == symbol]
intersections = list(intersecting_ranges(map(
from_tuple,
zip(persymbol.start_date, persymbol.end_date),
)))
if intersections:
ambigious[symbol] = (
intersections,
persymbol[['start_date', 'end_date']].astype('datetime64[ns]'),
)
if ambigious:
raise ValueError(
'Ambiguous ownership for %d symbol%s, multiple assets held the'
' following symbols:\n%s' % (
len(ambigious),
'' if len(ambigious) == 1 else 's',
'\n'.join(
'%s:\n intersections: %s\n %s' % (
symbol,
tuple(map(_format_range, intersections)),
# indent the dataframe string
'\n '.join(str(df).splitlines()),
)
for symbol, (intersections, df) in sorted(
ambigious.items(),
key=first,
),
),
)
)
return (
df.groupby(level=0).apply(_check_asset_group),
df[list(mapping_columns)],
)
def _dt_to_epoch_s(dt_series):
"""Convert a timeseries into an Int64Index of seconds since the epoch.
Parameters
----------
dt_series : pd.Series
The timeseries to convert.
Returns
-------
idx : pd.Int64Index
The index converted to seconds since the epoch.
"""
index = pd.to_datetime(dt_series.values)
if index.tzinfo is None:
index = index.tz_localize('UTC')
else:
index = index.tz_convert('UTC')
return (index.view(np.int64) / 10e9).astype(int)
def _dt_to_epoch_ns(dt_series):
"""Convert a timeseries into an Int64Index of nanoseconds since the epoch.
Parameters
----------
dt_series : pd.Series
The timeseries to convert.
Returns
-------
idx : pd.Int64Index
The index converted to nanoseconds since the epoch.
"""
index = pd.to_datetime(dt_series.values)
if index.tzinfo is None:
index = index.tz_localize('UTC')
else:
index = index.tz_convert('UTC')
return index.view(np.int64)
def check_version_info(conn, version_table, expected_version):
"""
Checks for a version value in the version table.
Parameters
----------
conn : sa.Connection
The connection to use to perform the check.
version_table : sa.Table
The version table of the asset database
expected_version : int
The expected version of the asset database
Raises
------
AssetDBVersionError
If the version is in the table and not equal to ASSET_DB_VERSION.
"""
# Read the version out of the table
version_from_table = conn.execute(
sa.select((version_table.c.version,)),
).scalar()
# A db without a version is considered v0
if version_from_table is None:
version_from_table = 0
# Raise an error if the versions do not match
if (version_from_table != expected_version):
raise AssetDBVersionError(db_version=version_from_table,
expected_version=expected_version)
def write_version_info(conn, version_table, version_value):
"""
Inserts the version value in to the version table.
Parameters
----------
conn : sa.Connection
The connection to use to execute the insert.
version_table : sa.Table
The version table of the asset database
version_value : int
The version to write in to the database
"""
conn.execute(sa.insert(version_table, values={'version': version_value}))
class _empty(object):
columns = ()
class AssetDBWriter(object):
"""Class used to write data to an assets db.
Parameters
----------
engine : Engine or str
An SQLAlchemy engine or path to a SQL database.
"""
DEFAULT_CHUNK_SIZE = SQLITE_MAX_VARIABLE_NUMBER
@preprocess(engine=coerce_string_to_eng)
def __init__(self, engine):
self.engine = engine
def write(self,
equities=None,
futures=None,
exchanges=None,
root_symbols=None,
chunk_size=DEFAULT_CHUNK_SIZE):
"""Write asset metadata to a sqlite database.
Parameters
----------
equities : pd.DataFrame, optional
The equity metadata. The columns for this dataframe are:
symbol : str
The ticker symbol for this equity.
asset_name : str
The full name for this asset.
start_date : datetime
The date when this asset was created.
end_date : datetime, optional
The last date we have trade data for this asset.
first_traded : datetime, optional
The first date we have trade data for this asset.
auto_close_date : datetime, optional
The date on which to close any positions in this asset.
exchange : str, optional
The exchange where this asset is traded.
The index of this dataframe should contain the sids.
futures : pd.Dataframe, optional
The future contract metadata. The columns for this dataframe are:
symbol : str
The ticker symbol for this futures contract.
root_symbol : str
The root symbol, or the symbol with the expiration stripped
out.
asset_name : str
The full name for this asset.
start_date : datetime, optional
The date when this asset was created.
end_date : datetime, optional
The last date we have trade data for this asset.
first_traded : datetime, optional
The first date we have trade data for this asset.
exchange : str, optional
The exchange where this asset is traded.
notice_date : datetime
The date when the owner of the contract may be forced
to take physical delivery of the contract's asset.
expiration_date : datetime
The date when the contract expires.
auto_close_date : datetime
The date when the broker will automatically close any
positions in this contract.
tick_size : float
The minimum price movement of the contract.
multiplier: float
The amount of the underlying asset represented by this
contract.
exchanges : pd.Dataframe, optional
The exchanges where assets can be traded. The columns of this
dataframe are:
exchange : str
The name of the exchange.
timezone : str
The timezone of the exchange.
root_symbols : pd.Dataframe, optional
The root symbols for the futures contracts. The columns for this
dataframe are:
root_symbol : str
The root symbol name.
root_symbol_id : int
The unique id for this root symbol.
sector : string, optional
The sector of this root symbol.
description : string, optional
A short description of this root symbol.
exchange : str
The exchange where this root symbol is traded.
chunk_size : int, optional
The amount of rows to write to the SQLite table at once.
This defaults to the default number of bind params in sqlite.
If you have compiled sqlite3 with more bind or less params you may
want to pass that value here.
See Also
--------
zipline.assets.asset_finder
"""
with self.engine.begin() as conn:
# Create SQL tables if they do not exist.
self.init_db(conn)
# Get the data to add to SQL.
data = self._load_data(
equities if equities is not None else pd.DataFrame(),
futures if futures is not None else pd.DataFrame(),
exchanges if exchanges is not None else pd.DataFrame(),
root_symbols if root_symbols is not None else pd.DataFrame(),
)
# Write the data to SQL.
self._write_df_to_table(
futures_exchanges,
data.exchanges,
conn,
chunk_size,
)
self._write_df_to_table(
futures_root_symbols,
data.root_symbols,
conn,
chunk_size,
)
self._write_assets(
'future',
data.futures,
conn,
chunk_size,
)
self._write_assets(
'equity',
data.equities,
conn,
chunk_size,
mapping_data=data.equities_mappings,
)
def _write_df_to_table(self, tbl, df, txn, chunk_size, idx_label=None):
df.to_sql(
tbl.name,
self.engine,
index_label=(
idx_label
if idx_label is not None else
first(tbl.primary_key.columns).name
),
if_exists='append',
chunksize=chunk_size,
)
def _write_assets(self,
asset_type,
assets,
txn,
chunk_size,
mapping_data=None):
if asset_type == 'future':
tbl = futures_contracts_table
if mapping_data is not None:
raise TypeError('no mapping data expected for futures')
self._write_df_to_table(tbl, assets, txn, chunk_size)
elif asset_type == 'equity':
tbl = equities_table
self._write_df_to_table(tbl, assets, txn, chunk_size)
if mapping_data is None:
raise TypeError('mapping data required for equities')
# write the symbol mapping data.
self._write_df_to_table(
equity_symbol_mappings,
mapping_data,
txn,
chunk_size,
idx_label='sid',
)
else:
raise ValueError(
"asset_type must be in {'future', 'equity'}, got: %s" %
asset_type,
)
pd.DataFrame({
asset_router.c.sid.name: assets.index.values,
asset_router.c.asset_type.name: asset_type,
}).to_sql(
asset_router.name,
self.engine,
if_exists='append',
index=False,
chunksize=chunk_size
)
def _all_tables_present(self, txn):
"""
Checks if any tables are present in the current assets database.
Parameters
----------
txn : Transaction
The open transaction to check in.
Returns
-------
has_tables : bool
True if any tables are present, otherwise False.
"""
conn = txn.connect()
for table_name in asset_db_table_names:
if txn.dialect.has_table(conn, table_name):
return True
return False
def init_db(self, txn=None):
"""Connect to database and create tables.
Parameters
----------
txn : sa.engine.Connection, optional
The transaction to execute in. If this is not provided, a new
transaction will be started with the engine provided.
Returns
-------
metadata : sa.MetaData
The metadata that describes the new assets db.
"""
with ExitStack() as stack:
if txn is None:
txn = stack.enter_context(self.engine.begin())
tables_already_exist = self._all_tables_present(txn)
# Create the SQL tables if they do not already exist.
metadata.create_all(txn, checkfirst=True)
if tables_already_exist:
check_version_info(txn, version_info, ASSET_DB_VERSION)
else:
write_version_info(txn, version_info, ASSET_DB_VERSION)
def _normalize_equities(self, equities):
# HACK: If 'company_name' is provided, map it to asset_name
if ('company_name' in equities.columns and
'asset_name' not in equities.columns):
equities['asset_name'] = equities['company_name']
# remap 'file_name' to 'symbol' if provided
if 'file_name' in equities.columns:
equities['symbol'] = equities['file_name']
equities_output = _generate_output_dataframe(
data_subset=equities,
defaults=_equities_defaults,
)
# Split symbols to company_symbols and share_class_symbols
tuple_series = equities_output['symbol'].apply(split_delimited_symbol)
split_symbols = pd.DataFrame(
tuple_series.tolist(),
columns=['company_symbol', 'share_class_symbol'],
index=tuple_series.index
)
equities_output = pd.concat((equities_output, split_symbols), axis=1)
# Upper-case all symbol data
for col in symbol_columns:
equities_output[col] = equities_output[col].str.upper()
# Convert date columns to UNIX Epoch integers (nanoseconds)
for col in ('start_date',
'end_date',
'first_traded',
'auto_close_date'):
equities_output[col] = _dt_to_epoch_s(equities_output[col])
return _split_symbol_mappings(equities_output)
def _normalize_futures(self, futures):
futures_output = _generate_output_dataframe(
data_subset=futures,
defaults=_futures_defaults,
)
for col in ('symbol', 'root_symbol'):
futures_output[col] = futures_output[col].str.upper()
for col in ('start_date',
'end_date',
'first_traded',
'notice_date',
'expiration_date',
'auto_close_date'):
futures_output[col] = _dt_to_epoch_s(futures_output[col])
return futures_output
def _load_data(self, equities, futures, exchanges, root_symbols):
"""
Returns a standard set of pandas.DataFrames:
equities, futures, exchanges, root_symbols
"""
# Check whether identifier columns have been provided.
# If they have, set the index to this column.
# If not, assume the index already cotains the identifier information.
for df, id_col in [(equities, 'sid'),
(futures, 'sid'),
(exchanges, 'exchange'),
(root_symbols, 'root_symbol')]:
if id_col in df.columns:
df.set_index(id_col, inplace=True)
equities_output, equities_mappings = self._normalize_equities(equities)
futures_output = self._normalize_futures(futures)
exchanges_output = _generate_output_dataframe(
data_subset=exchanges,
defaults=_exchanges_defaults,
)
root_symbols_output = _generate_output_dataframe(
data_subset=root_symbols,
defaults=_root_symbols_defaults,
)
return AssetData(
equities=equities_output,
equities_mappings=equities_mappings,
futures=futures_output,
exchanges=exchanges_output,
root_symbols=root_symbols_output,
)
| {
"content_hash": "7b30aa9635a302284a0ea644cbf618a9",
"timestamp": "",
"source": "github",
"line_count": 684,
"max_line_length": 79,
"avg_line_length": 31.970760233918128,
"alnum_prop": 0.5672672398024511,
"repo_name": "bernoullio/toolbox",
"id": "cbeb7c464caa1a3f263e9d3085582738b916586e",
"size": "22450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "forex_toolbox/zipline_extension/assets/asset_writer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "153307"
}
],
"symlink_target": ""
} |
import json
from tempest.common import rest_client
class VersionV3ClientJSON(rest_client.RestClient):
def __init__(self, config, username, password, auth_url, tenant_name=None):
super(VersionV3ClientJSON, self).__init__(config, username,
password, auth_url,
tenant_name)
self.service = self.config.compute.catalog_v3_type
def get_version(self):
resp, body = self.get('')
body = json.loads(body)
return resp, body['version']
| {
"content_hash": "92ebe5e00e785a910051f1b8f238f18c",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 79,
"avg_line_length": 33.94117647058823,
"alnum_prop": 0.5597920277296361,
"repo_name": "BeenzSyed/tempest",
"id": "1773af58a731f8f43b29555cc4f6c310c70107b3",
"size": "1209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/services/compute/v3/json/version_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2613370"
},
{
"name": "Shell",
"bytes": "8687"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function, unicode_literals
import os
try:
import koji as koji
except ImportError:
import inspect
import sys
# Find our mocked koji module
import tests.koji as koji
mock_koji_path = os.path.dirname(inspect.getfile(koji.ClientSession))
if mock_koji_path not in sys.path:
sys.path.append(os.path.dirname(mock_koji_path))
# Now load it properly, the same way the plugin will
del koji
import koji as koji
from atomic_reactor.plugins.pre_koji import KojiPlugin
from atomic_reactor.core import DockerTasker
from atomic_reactor.inner import DockerBuildWorkflow
from atomic_reactor.plugin import PreBuildPluginsRunner
from atomic_reactor.util import ImageName
from flexmock import flexmock
import pytest
from tests.constants import SOURCE, MOCK
if MOCK:
from tests.docker_mock import mock_docker
class X(object):
pass
KOJI_TARGET = "target"
KOJI_TARGET_BROKEN_TAG = "target-broken"
KOJI_TARGET_BROKEN_REPO = "target-broken-repo"
KOJI_TAG = "tag"
KOJI_BROKEN_TAG = "tag-broken"
KOJI_BROKEN_REPO = "tag-broken-repo"
GET_TARGET_RESPONSE = {"build_tag_name": KOJI_TAG}
BROKEN_TAG_RESPONSE = {"build_tag_name": KOJI_BROKEN_TAG}
BROKEN_REPO_RESPONSE = {"build_tag_name": KOJI_BROKEN_REPO}
TAG_ID = "1"
BROKEN_REPO_TAG_ID = "2"
GET_TAG_RESPONSE = {"id": TAG_ID, "name": KOJI_TAG}
REPO_ID = "2"
BROKEN_REPO_ID = "3"
REPO_BROKEN_TAG_RESPONSE = {"id": BROKEN_REPO_ID, "name": KOJI_BROKEN_REPO}
GET_REPO_RESPONSE = {"id": "2"}
ROOT = "http://example.com"
# ClientSession is xmlrpc instance, we need to mock it explicitly
class MockedClientSession(object):
def __init__(self, hub, opts=None):
pass
def getBuildTarget(self, target):
if target == KOJI_TARGET_BROKEN_TAG:
return BROKEN_TAG_RESPONSE
if target == KOJI_TARGET_BROKEN_REPO:
return BROKEN_REPO_RESPONSE
return GET_TARGET_RESPONSE
def getTag(self, tag):
if tag == KOJI_BROKEN_TAG:
return None
if tag == KOJI_BROKEN_REPO:
return REPO_BROKEN_TAG_RESPONSE
return GET_TAG_RESPONSE
def getRepo(self, repo):
if repo == BROKEN_REPO_ID:
return None
return GET_REPO_RESPONSE
def ssl_login(self, cert=None, ca=None, serverca=None, proxyuser=None):
self.ca_path = ca
self.cert_path = cert
self.serverca_path = serverca
return True
class MockedPathInfo(object):
def __init__(self, topdir=None):
self.topdir = topdir
def repo(self, repo_id, name):
return "{0}/repos/{1}/{2}".format(self.topdir, name, repo_id)
def prepare():
if MOCK:
mock_docker()
tasker = DockerTasker()
workflow = DockerBuildWorkflow(SOURCE, "test-image")
setattr(workflow, 'builder', X())
setattr(workflow.builder, 'image_id', "asd123")
setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='21'))
setattr(workflow.builder, 'source', X())
setattr(workflow.builder.source, 'dockerfile_path', None)
setattr(workflow.builder.source, 'path', None)
session = MockedClientSession(hub='', opts=None)
workflow.koji_session = session
flexmock(koji,
ClientSession=session,
PathInfo=MockedPathInfo)
return tasker, workflow
class TestKoji(object):
@pytest.mark.parametrize(('target', 'expect_success'), [
(KOJI_TARGET, True),
(KOJI_TARGET_BROKEN_TAG, False),
(KOJI_TARGET_BROKEN_REPO, False)])
@pytest.mark.parametrize(('root',
'koji_ssl_certs',
'expected_string',
'expected_file',
'proxy'), [
# Plain http repo
('http://example.com',
False,
None,
None,
None),
# Plain http repo with proxy
('http://example.com',
False,
None,
None,
'http://proxy.example.com'),
# https with koji_ssl_certs
# ('https://example.com',
# True,
# 'sslcacert=',
# '/etc/yum.repos.d/example.com.cert'),
# https with no cert available
('https://nosuchwebsiteforsure.com',
False,
'sslverify=0',
None,
None),
# https with no cert available
('https://nosuchwebsiteforsure.com',
False,
'sslverify=0',
None,
'http://proxy.example.com'),
# https with cert available
# ('https://example.com',
# False,
# 'sslcacert=/etc/yum.repos.d/example.com.cert',
# '/etc/yum.repos.d/example.com.cert'),
# https with a cert for authentication
('https://nosuchwebsiteforsure.com',
True,
'sslverify=0',
None,
'http://proxy.example.com'),
])
def test_koji_plugin(self,
target, expect_success,
tmpdir, root, koji_ssl_certs,
expected_string, expected_file, proxy):
tasker, workflow = prepare()
args = {
'target': target,
'hub': '',
'root': root,
'proxy': proxy,
}
if koji_ssl_certs:
args['koji_ssl_certs_dir'] = str(tmpdir)
tmpdir.join('cert').write('cert')
tmpdir.join('serverca').write('serverca')
runner = PreBuildPluginsRunner(tasker, workflow, [{
'name': KojiPlugin.key,
'args': args,
}])
runner.run()
if not expect_success:
return
if koji_ssl_certs:
for file_path, expected in [(workflow.koji_session.cert_path, 'cert'),
(workflow.koji_session.serverca_path, 'serverca')]:
assert os.path.isfile(file_path)
with open(file_path, 'r') as fd:
assert fd.read() == expected
repofile = '/etc/yum.repos.d/target.repo'
assert repofile in workflow.files
content = workflow.files[repofile]
assert content.startswith("[atomic-reactor-koji-plugin-target]\n")
assert "gpgcheck=0\n" in content
assert "enabled=1\n" in content
assert "name=atomic-reactor-koji-plugin-target\n" in content
assert "baseurl=%s/repos/tag/2/$basearch\n" % root in content
if proxy:
assert "proxy=%s" % proxy in content
if expected_string:
assert expected_string in content
if expected_file:
assert expected_file in workflow.files
| {
"content_hash": "86ef7644ee2a761440fd14d1ca33039d",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 91,
"avg_line_length": 29.256410256410255,
"alnum_prop": 0.5896874087058136,
"repo_name": "vrutkovs/atomic-reactor",
"id": "a3fd406264d3d1c54108ef31513df04ab533129e",
"size": "6846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/plugins/test_koji.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1413753"
},
{
"name": "Shell",
"bytes": "6571"
}
],
"symlink_target": ""
} |
import warnings
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ImproperlyConfigured
default_app_config = 'leonardo.module.search.SearchConfig'
class Default(object):
optgroup = 'Search'
@property
def apps(self):
INSTALLED_APPS = []
try:
import whoosh
except Exception as e:
try:
import haystack
except ImportError as e:
warnings.warn(
'Haystack search engine is disabled because: {}'.format(e))
except ImproperlyConfigured as e:
warnings.warn(
'Haystack search engine is disabled because: {}'.format(e))
else:
INSTALLED_APPS += ['haystack']
else:
INSTALLED_APPS += ['whoosh', 'haystack']
return INSTALLED_APPS + ['leonardo.module.search']
plugins = [
('leonardo.module.search.apps.search', _('Search'))
]
class SearchConfig(AppConfig, Default):
name = 'leonardo.module.search'
verbose_name = "Search Module"
default = Default()
| {
"content_hash": "b5af43a43a73020ec3cc4e1c0f4d7227",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 79,
"avg_line_length": 26.568181818181817,
"alnum_prop": 0.5936698032506416,
"repo_name": "django-leonardo/django-leonardo",
"id": "c544faa6fec38a658d8b4cb671a44f961dbf533a",
"size": "1170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leonardo/module/search/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "33187"
},
{
"name": "Dockerfile",
"bytes": "835"
},
{
"name": "HTML",
"bytes": "323851"
},
{
"name": "JavaScript",
"bytes": "264957"
},
{
"name": "Makefile",
"bytes": "1390"
},
{
"name": "Python",
"bytes": "705902"
},
{
"name": "SCSS",
"bytes": "68482"
},
{
"name": "Shell",
"bytes": "5569"
}
],
"symlink_target": ""
} |
import pygame, sys
from screen import screen
from constants import *
# font_path = pygame.font.match_font('Arial')
font_path = pygame.font.get_default_font()
def draw_text(text, center, color=white, size=30, bold=False, background=None,
left=False):
font = pygame.font.Font(font_path, size, bold=bold)
if background:
surface = font.render(text, True, color, background)
else:
surface = font.render(text, True, color)
rect = surface.get_rect()
rect.center = tuple(center)
if left:
rect.left = center[0]
screen.blit(surface, rect)
return rect
def draw_image(img, center):
rect = img.get_rect()
rect.center = tuple(center)
screen.blit(img, rect)
return rect
def draw_focus_screen():
p = screen.get_rect().center
draw_text('+', p, size=100)
def focus_slide():
screen.fill(background_color)
draw_focus_screen()
pygame.display.flip()
def simple_slide(text, size=100):
screen.fill(background_color)
p = screen.get_rect().center
draw_text(text, p, size=size)
pygame.display.flip()
def text_slide(text, size=30):
screen.fill(background_color)
lines = text.split('\n')
rect = draw_text('test', (SCREEN_WIDTH/2, SCREEN_HEIGHT/2),
size=size, color=background_color)
height = rect.height + 6
y = SCREEN_HEIGHT/2 - (len(lines)-1)*(height/2)
for line in lines:
draw_text(line.strip(), (SCREEN_WIDTH/2, y), size=size)
y += height
pygame.display.flip()
def image_slide(img):
screen.fill(background_color)
rect = draw_image(img, (SCREEN_WIDTH/2, SCREEN_HEIGHT/2))
pygame.display.update(rect)
def recognize_slide(word, size=100):
screen.fill(background_color)
p = screen.get_rect().center
draw_text(word, p, size=size)
draw_text("1=unknown", (200, SCREEN_HEIGHT-100), size=50)
draw_text("4=recognized", (SCREEN_WIDTH-250, SCREEN_HEIGHT-100), size=50)
pygame.display.flip()
| {
"content_hash": "891c6ac050d87f920fd2bddee51ad3c5",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 78,
"avg_line_length": 28.04225352112676,
"alnum_prop": 0.6443997990959317,
"repo_name": "lambdaloop/CIT-biosignals",
"id": "66154479b67bce8386585a7d016d8f7957f230a9",
"size": "1991",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pygame/drawstuff.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "868"
},
{
"name": "Python",
"bytes": "20142"
}
],
"symlink_target": ""
} |
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'TeamBuilder'
copyright = u"2015, Samuele"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tbdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'tb.tex',
u'TeamBuilder Documentation',
u"Samuele", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tb', u'TeamBuilder Documentation',
[u"Samuele"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tb', u'TeamBuilder Documentation',
u"Samuele", 'TeamBuilder',
'Teambuilder', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| {
"content_hash": "4fcb4eac84ad45a11841fe72a122c4eb",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 80,
"avg_line_length": 31.70258620689655,
"alnum_prop": 0.6924541128484024,
"repo_name": "0cN/tb",
"id": "c58026ba068afc01f2e87c565c881b637c59d391",
"size": "7749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1768"
},
{
"name": "HTML",
"bytes": "20101"
},
{
"name": "JavaScript",
"bytes": "3150"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "37788"
},
{
"name": "Shell",
"bytes": "4542"
}
],
"symlink_target": ""
} |
import unittest
from sql import Asc, Desc, Column, Table
class TestOrder(unittest.TestCase):
column = Column(Table('t'), 'c')
def test_asc(self):
self.assertEqual(str(Asc(self.column)), '"c" ASC')
def test_desc(self):
self.assertEqual(str(Desc(self.column)), '"c" DESC')
| {
"content_hash": "3244ceba486d1e73ee7c34c350b74dea",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 60,
"avg_line_length": 23.384615384615383,
"alnum_prop": 0.6414473684210527,
"repo_name": "shnergle/ShnergleServer",
"id": "6d859cde8fa4ec264d39d022af0e9545a4fec2f0",
"size": "1914",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "site-packages/sql/tests/test_order.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "774901"
}
],
"symlink_target": ""
} |
'''
Test framework for insights modules
-----------------------------------
:copyright (c) 2014 Xavier Bruhiere
:license: Apache 2.0, see LICENSE for more details.
'''
import abc
import unittest
from nose.tools import ok_, nottest
import pytz
import pandas as pd
import datetime as dt
import random
import yaml
import zipline.protocol
from dna.test_utils import setup_logger, teardown_logger
def _generate_sid_data(sid):
return {
'source_id': __name__ + '-1234',
'type': 4,
'sid': sid,
'dt': pd.tslib.Timestamp('2012/06/05', tz=pytz.utc),
'datetime': pd.tslib.Timestamp('2012/06/05', tz=pytz.utc),
'price': random.random() * 10,
'volume': random.random() * 1000
}
def generate_fake_returns(sids):
dates = pd.date_range('2012/01/01', '2012/06/01', tz=pytz.utc)
return pd.DataFrame(
{sid: [random.random() * 100 for _ in range(len(dates))]
for sid in sids},
index=dates
)
def generate_fake_portfolio(sids):
pf = zipline.protocol.Portfolio()
for sid in sids:
pf.positions[sid] = zipline.protocol.Position(sid)
pf.positions[sid].amount = random.randint(1, 100)
cost = random.randrange(1, 200)
pf.positions[sid].cost_basis = cost
pf.positions[sid].last_sale_price = random.randrange(
int(cost), int(cost + 1000 * random.random())
)
return pf
# pylint: disable=R0921
class FactoryAlgorithmTestCase(unittest.TestCase):
'''
New algorithm tests inherit from this factory class. The main idea is to
provide a common ground to validate Intuition compliant algorithms, and
ease tests writting.
'''
__metaclass__ = abc.ABCMeta
def setUp(self):
setup_logger(self)
self.default_identity = 'johndoe'
self.event_data = zipline.protocol.BarData()
for sid in ['goog', 'aapl', 'msft']:
self.event_data[sid] = zipline.protocol.SIDData(
_generate_sid_data(sid)
)
def tearDown(self):
teardown_logger(self)
def _check_yaml_doc(self, Algo):
doc = yaml.load(Algo.__doc__)
ok_(isinstance(doc, dict))
self.assertIn('doc', doc)
ok_(isinstance(doc.get('parameters', {}), dict))
def _check_signal_sid(self, sid_data):
self.assertIsInstance(sid_data, zipline.protocol.SIDData)
for info in _generate_sid_data('').keys():
self.assertIn(info, sid_data)
def _check_event_output(self, signals):
if signals is not None:
self.assertIsInstance(signals, dict)
for signal_type in ['buy', 'sell']:
for sid in signals.get(signal_type, {}):
self._check_signal_sid(signals[signal_type][sid])
# pylint: disable=R0921
class FactoryManagerTestCase(unittest.TestCase):
'''
This abstract factory class targets the same achievements as the
FactoryAlgorithmTestCase, focusing on Intuition managers building block.
'''
__metaclass__ = abc.ABCMeta
def setUp(self):
setup_logger(self)
self.some_date = dt.datetime(2014, 04, 10)
self.buy_signal = {'goog': 34}
self.test_sids = ['goog', 'aapl', 'msft']
self.test_pf = generate_fake_portfolio(self.test_sids)
def tearDown(self):
teardown_logger(self)
@nottest
def _check_initialization(self, manager):
ok_(not manager.date)
ok_(not manager.portfolio)
ok_(not manager.perfs)
ok_(hasattr(manager, 'log'))
@nottest
def _check_optimize_return(self, alloc, e_ret, e_risk):
ok_(isinstance(alloc, dict))
ok_(e_ret >= 0 and e_ret <= 1)
ok_(e_risk >= 0 and e_risk <= 1)
| {
"content_hash": "a47243d45881a6fb7874517c17384f14",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 76,
"avg_line_length": 29.606299212598426,
"alnum_prop": 0.6058510638297873,
"repo_name": "intuition-io/insights",
"id": "c78ddc238f776fcfa4919c0c3402dfa9e1f3e7d5",
"size": "3802",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "insights/test_framework.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "117006"
},
{
"name": "R",
"bytes": "10202"
},
{
"name": "Shell",
"bytes": "1776"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse
from FileManager.forms import CloudFileForm
from FileManager.models import CloudFile, Genre
import json
def index(request):
return render(request, 'index-fm.html')
def show_genres(request):
return render(request, "genres.html",
{'nodes': Genre.objects.all()})
def create_dir(request, pid):
dir = CloudFile(name='directory',
file='CloudFiles/directory',
md5sum='unknown',
size=0,
f_type='text/directory',
parent=pid)
try:
dir.save()
except Exception as e:
return HttpResponse(e, status=500)
return HttpResponse(b'OK')
def upload(request):
if request.method == 'POST':
form = CloudFileForm(request.POST, request.FILES)
print(form.errors)
if form.is_valid():
form.save()
return HttpResponse(json.dumps({'state': 'OK'}), 'application/json')
# return render(request, 'index-fm.html', {'form': form})
else:
form = CloudFileForm()
return HttpResponse(json.dumps({'state': 'NULL'}), 'application/json')
# return render(request, 'index-fm.html', {'form': form})
def delete(request, fid):
if request.method == 'DELETE':
f = CloudFile.objects.filter(pk=fid)
if len(f) == 1:
f[0].delete()
return HttpResponse(json.dumps({'state': 'OK'}), 'application/json')
else:
return HttpResponse(json.dumps({'state': 'File Not Found'}), status=404)
| {
"content_hash": "9405e37ee3a053b0f37f63a1c4cc29b5",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 80,
"avg_line_length": 30.735849056603772,
"alnum_prop": 0.5991405770411296,
"repo_name": "Yuvv/LearnTestDemoTempMini",
"id": "ade58d39e6810c96ab511163563d896bfef361cb",
"size": "1629",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py-django/FileUpload/FileManager/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "154280"
},
{
"name": "C++",
"bytes": "15779"
},
{
"name": "CSS",
"bytes": "13766"
},
{
"name": "HTML",
"bytes": "22273"
},
{
"name": "JavaScript",
"bytes": "17383"
},
{
"name": "PLpgSQL",
"bytes": "1493"
},
{
"name": "Python",
"bytes": "198537"
},
{
"name": "SQLPL",
"bytes": "1960"
},
{
"name": "Shell",
"bytes": "5487"
}
],
"symlink_target": ""
} |
'''
This script is used for generating data for Deopen training.
Usage:
python Gen_data.py -pos <positive_bed_file> -neg <negative_bed_file> -out <outputfile>
python Gen_data.py -l 1000 -s 100000 -in <inputfile> -out <outputfile>
'''
import numpy as np
from pyfasta import Fasta
import hickle as hkl
import argparse
import gzip
#transfrom a sequence to one-hot encoding matrix
def seq_to_mat(seq):
encoding_matrix = {'a':0, 'A':0, 'c':1, 'C':1, 'g':2, 'G':2, 't':3, 'T':3, 'n':4, 'N':4}
mat = np.zeros((len(seq),5))
for i in range(len(seq)):
mat[i,encoding_matrix[seq[i]]] = 1
mat = mat[:,:4]
return mat
#transform a sequence to K-mer vector (default: K=6)
def seq_to_kspec(seq, K=6):
encoding_matrix = {'a':0, 'A':0, 'c':1, 'C':1, 'g':2, 'G':2, 't':3, 'T':3, 'n':0, 'N':0}
kspec_vec = np.zeros((4**K,1))
for i in range(len(seq)-K+1):
sub_seq = seq[i:(i+K)]
index = 0
for j in range(K):
index += encoding_matrix[sub_seq[j]]*(4**(K-j-1))
kspec_vec[index] += 1
return kspec_vec
#assemble all the features into a dictionary
def get_all_feats(spot,genome,label):
ret = {}
ret['spot'] = spot
ret['seq'] = genome[spot[0]][spot[1]:spot[2]]
ret['mat'] = seq_to_mat(ret['seq'])
ret['kmer'] = seq_to_kspec(ret['seq'])
ret['y'] = label
return ret
#save the preprocessed data in hkl format
def save_dataset(origin_dataset,save_dir):
dataset = {}
for key in origin_dataset[0].keys():
dataset[key] = [item[key] for item in origin_dataset]
dataset['seq'] = [item.encode('ascii','ignore') for item in dataset['seq']]
for key in origin_dataset[0].keys():
dataset[key] = np.array(dataset[key])
hkl.dump(dataset,save_dir, mode='w', compression='gzip')
print 'Training data generation is finished!'
#generate dataset
def generate_dataset(positive_file,negative_file,genome_file,sample_length = 1000):
dataset=[]
genome = Fasta(genome_file)
with open(positive_file,'r') as f_pos:
for line in f_pos:
chrom = line.rstrip('\n').split('\t')[0]
start = int(line.rstrip('\n').split('\t')[1])
end = int(line.rstrip('\n').split('\t')[2])
mid = (start+end)/2
dataset.append(get_all_feats([chrom,mid-sample_length/2,mid+sample_length/2],genome,1))
f_pos.close()
with open(negative_file,'r') as f_neg:
for line in f_neg:
chrom = line.rstrip('\n').split('\t')[0]
start = int(line.rstrip('\n').split('\t')[1])
end = int(line.rstrip('\n').split('\t')[2])
mid = (start+end)/2
dataset.append(get_all_feats([chrom,mid-sample_length/2,mid+sample_length/2],genome,0))
f_neg.close()
return dataset
if __name__ == "__main__" :
parser = argparse.ArgumentParser(description='Deopen data generation')
parser.add_argument('-pos', dest='pos', type=str, help='input positive bed file')
parser.add_argument('-neg', dest='neg', type=str, help='input negative bed file')
parser.add_argument('-genome', dest='genome', type=str, help='genome file in fasta format')
parser.add_argument('-l', dest='length', type=int, default=1000, help='sequence length')
parser.add_argument('-out', dest='output', type=str, help='output file')
args = parser.parse_args()
dataset = generate_dataset(args.pos,args.neg,args.genome,args.length)
save_dataset(dataset,args.output)
| {
"content_hash": "99cb850999a015aad6afa1b26bc8b083",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 99,
"avg_line_length": 38.29347826086956,
"alnum_prop": 0.5994890718137951,
"repo_name": "kimmo1019/Deopen",
"id": "7bb8205d0dbc30776b13b59ab481fb6c196b13a5",
"size": "3523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Gen_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21393"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from sortedm2m.fields import SortedManyToManyField
class LightningTalk(models.Model):
title = models.CharField(_("Title"), max_length=255)
speakers = SortedManyToManyField(
"speakers.Speaker", related_name='lightning_talks', blank=True,
verbose_name=_("speakers"))
description = models.TextField(_("description"), blank=True, null=True)
slides_url = models.URLField(_("slides URL"), blank=True, null=True)
def __unicode__(self):
return self.title
| {
"content_hash": "621afde6b87edf5578392c18bd59a898",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 75,
"avg_line_length": 36.25,
"alnum_prop": 0.7086206896551724,
"repo_name": "pysv/djep",
"id": "3901208a2bf722404c276a5aed0c54caacdf0fb5",
"size": "580",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pyconde/lightningtalks/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "254575"
},
{
"name": "HTML",
"bytes": "726728"
},
{
"name": "JavaScript",
"bytes": "225740"
},
{
"name": "Makefile",
"bytes": "3999"
},
{
"name": "Puppet",
"bytes": "2679"
},
{
"name": "Python",
"bytes": "683293"
},
{
"name": "Roff",
"bytes": "202148"
},
{
"name": "Ruby",
"bytes": "181"
},
{
"name": "Shell",
"bytes": "1393"
}
],
"symlink_target": ""
} |
import json
from pprint import pprint
'''
Find which program codes are not in the majors.json file
'''
def parse():
code_file = open("codes.txt", "r")
codes = []
for line in code_file:
codes.append(line)
with open('../json/majors.json') as data_file:
data = json.load(data_file)
included = set()
for program in data:
curr = program['calLink'][-4:]
included.add(curr)
not_included = set()
for code in codes:
code = code[5:9]
if code not in included:
not_included.add(code)
# print(not_included)
for item in not_included:
print(item)
# Prints nicer for me to read
if __name__ == "__main__":
parse()
| {
"content_hash": "bf314626cb56609de41a86a86b0aed4c",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 57,
"avg_line_length": 19.675675675675677,
"alnum_prop": 0.5686813186813187,
"repo_name": "patrickleweryharris/major-finder",
"id": "86acdbbff118d1fe86db7771a3b8cce12d51f07d",
"size": "728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parse/parse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4949"
},
{
"name": "JavaScript",
"bytes": "121625"
},
{
"name": "Python",
"bytes": "728"
}
],
"symlink_target": ""
} |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from train_utils import batchify_data, run_epoch, train_model, Flatten
import utils_multiMNIST as U
path_to_data_dir = '../Datasets/'
use_mini_dataset = True
batch_size = 64
nb_classes = 10
nb_epoch = 30
num_classes = 10
img_rows, img_cols = 42, 28 # input image dimensions
class CNN(nn.Module):
def __init__(self, input_dimension):
super(CNN, self).__init__()
# TODO initialize model layers here
def forward(self, x):
# TODO use model layers to predict the two digits
return out_first_digit, out_second_digit
def main():
X_train, y_train, X_test, y_test = U.get_data(path_to_data_dir, use_mini_dataset)
# Split into train and dev
dev_split_index = int(9 * len(X_train) / 10)
X_dev = X_train[dev_split_index:]
y_dev = [y_train[0][dev_split_index:], y_train[1][dev_split_index:]]
X_train = X_train[:dev_split_index]
y_train = [y_train[0][:dev_split_index], y_train[1][:dev_split_index]]
permutation = np.array([i for i in range(len(X_train))])
np.random.shuffle(permutation)
X_train = [X_train[i] for i in permutation]
y_train = [[y_train[0][i] for i in permutation], [y_train[1][i] for i in permutation]]
# Split dataset into batches
train_batches = batchify_data(X_train, y_train, batch_size)
dev_batches = batchify_data(X_dev, y_dev, batch_size)
test_batches = batchify_data(X_test, y_test, batch_size)
# Load model
input_dimension = img_rows * img_cols
model = CNN(input_dimension) # TODO add proper layers to CNN class above
# Train
train_model(train_batches, dev_batches, model)
## Evaluate the model on test data
loss, acc = run_epoch(test_batches, model.eval(), None)
print('Test loss1: {:.6f} accuracy1: {:.6f} loss2: {:.6f} accuracy2: {:.6f}'.format(loss[0], acc[0], loss[1], acc[1]))
if __name__ == '__main__':
# Specify seed for deterministic behavior, then shuffle. Do not change seed for official submissions to edx
np.random.seed(12321) # for reproducibility
torch.manual_seed(12321) # for reproducibility
main()
| {
"content_hash": "c6d031ec0b264bab4edb2a146a0bc44d",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 126,
"avg_line_length": 33.58461538461538,
"alnum_prop": 0.6582684379294549,
"repo_name": "xunilrj/sandbox",
"id": "b3a9340a3008126b08a5a978777e695e25d37e30",
"size": "2183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "courses/MITx/MITx 6.86x Machine Learning with Python-From Linear Models to Deep Learning/project2/mnist/part2-twodigit/conv.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "235"
},
{
"name": "ASP.NET",
"bytes": "110"
},
{
"name": "Assembly",
"bytes": "28409"
},
{
"name": "Asymptote",
"bytes": "22978"
},
{
"name": "C",
"bytes": "1022035"
},
{
"name": "C#",
"bytes": "474510"
},
{
"name": "C++",
"bytes": "33387716"
},
{
"name": "CMake",
"bytes": "1288737"
},
{
"name": "CSS",
"bytes": "49690"
},
{
"name": "Common Lisp",
"bytes": "858"
},
{
"name": "Coq",
"bytes": "6200"
},
{
"name": "Dockerfile",
"bytes": "2912"
},
{
"name": "Elixir",
"bytes": "34"
},
{
"name": "Erlang",
"bytes": "8204"
},
{
"name": "F#",
"bytes": "33187"
},
{
"name": "Fortran",
"bytes": "20472"
},
{
"name": "GDB",
"bytes": "701"
},
{
"name": "GLSL",
"bytes": "7478"
},
{
"name": "Go",
"bytes": "8971"
},
{
"name": "HTML",
"bytes": "6469462"
},
{
"name": "Handlebars",
"bytes": "8236"
},
{
"name": "Haskell",
"bytes": "18581"
},
{
"name": "Java",
"bytes": "120539"
},
{
"name": "JavaScript",
"bytes": "5055335"
},
{
"name": "Jupyter Notebook",
"bytes": "1849172"
},
{
"name": "LLVM",
"bytes": "43431"
},
{
"name": "MATLAB",
"bytes": "462980"
},
{
"name": "Makefile",
"bytes": "1622666"
},
{
"name": "Objective-C",
"bytes": "2001"
},
{
"name": "PostScript",
"bytes": "45490"
},
{
"name": "PowerShell",
"bytes": "192867"
},
{
"name": "Python",
"bytes": "726138"
},
{
"name": "R",
"bytes": "31364"
},
{
"name": "Roff",
"bytes": "5700"
},
{
"name": "Ruby",
"bytes": "5865"
},
{
"name": "Rust",
"bytes": "797104"
},
{
"name": "Sage",
"bytes": "654"
},
{
"name": "Scala",
"bytes": "42383"
},
{
"name": "Shell",
"bytes": "154039"
},
{
"name": "TLA",
"bytes": "16779"
},
{
"name": "TSQL",
"bytes": "3412"
},
{
"name": "TeX",
"bytes": "6989202"
},
{
"name": "TypeScript",
"bytes": "8845"
},
{
"name": "Visual Basic .NET",
"bytes": "1090"
},
{
"name": "WebAssembly",
"bytes": "70321"
},
{
"name": "q",
"bytes": "13889"
}
],
"symlink_target": ""
} |
"""
Django settings for fitster project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@-g7o97pd-t95z1mn!&!&%f6&o=(((t#)-mb0ahh+%qal+n+6i'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['52.11.65.78','127.0.0.1','localhost']
# Application definition
INSTALLED_APPS = [
'healthtracker.apps.HealthtrackerConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'fitster.urls'
LOGIN_URL = '/healthtracker/'
LOGIN_REDIRECT_URL = '/healthtracker/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fitster.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'fitsterdb',
'USER': 'fitsterdb',
'PASSWORD': 'fitsterdb',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Password hashers
# https://docs.djangoproject.com/en/1.10/ref/settings/#password-hashers
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Istanbul'
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.2006', '25.10.06'
'%d-%m-%Y', '%d/%m/%Y', '%d/%m/%y', # '25-10-2006', '25/10/2006', '25/10/06'
'%d %b %Y', # '25 Oct 2006',
'%d %B %Y', # '25 October 2006',
)
USE_I18N = False
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
# Logging
# https://docs.djangoproject.com/en/1.10/topics/logging/#configuring-logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(asctime)s %(levelname)s %(module)s %(process)d %(thread)d %(message)s',
},
'simple': {
'format': '%(asctime)s %(levelname)s %(message)s',
},
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(BASE_DIR, 'django_debug.log'),
'formatter': 'simple',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple',
}
},
'loggers': {
'django': {
'handlers': ['file', 'console'],
'level': 'DEBUG',
'propagate': True,
},
},
}
## Custom settings below this point
# API Key to access USDA API
#USDA_API_KEY = 'DEMO_KEY'
USDA_API_KEY = 'DtFfmTivnG1A1cJ6hr5o7CjE0WPZu8DcuqnfxWyD'
# Response format to be returned from USDA API
USDA_RESPONSE_FORMAT = 'json'
# USDA API base URL
USDA_BASE_URL = 'http://api.nal.usda.gov/ndb'
# Resource JSONs
ACTIVITY_JSON = 'activity.json'
ACTIVITY_GROUP_JSON = 'activityGroup.json'
NUTRIENTS_JSON = 'nutrients.json'
QUOTES_JSON = 'quotes.json'
| {
"content_hash": "8b5c35dba3ba49c078001270f363e07a",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 96,
"avg_line_length": 26.866336633663366,
"alnum_prop": 0.6379215035931454,
"repo_name": "TalatCikikci/Fall2016Swe573_HealthTracker",
"id": "b033b148118b29087cb4532f86e706f24e8e05a6",
"size": "5427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fitster/fitster/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "368"
},
{
"name": "HTML",
"bytes": "27814"
},
{
"name": "Python",
"bytes": "56138"
}
],
"symlink_target": ""
} |
from toontown.building import DistributedBBElevatorAI
from toontown.building import FADoorCodes
from toontown.building.DistributedBoardingPartyAI import DistributedBoardingPartyAI
from toontown.coghq import DistributedCogKartAI
from toontown.hood import CogHQAI
from toontown.suit import DistributedBossbotBossAI
from toontown.toonbase import ToontownGlobals
class BossbotHQAI(CogHQAI.CogHQAI):
def __init__(self, air):
CogHQAI.CogHQAI.__init__(
self, air, ToontownGlobals.BossbotHQ, ToontownGlobals.BossbotLobby,
FADoorCodes.BB_DISGUISE_INCOMPLETE,
DistributedBBElevatorAI.DistributedBBElevatorAI,
DistributedBossbotBossAI.DistributedBossbotBossAI)
self.cogKarts = []
self.courseBoardingParty = None
self.startup()
def startup(self):
CogHQAI.CogHQAI.startup(self)
self.createCogKarts()
if simbase.config.GetBool('want-boarding-groups', True):
self.createCourseBoardingParty()
def createCogKarts(self):
posList = (
(154.762, 37.169, 0), (141.403, -81.887, 0),
(-48.44, 15.308, 0)
)
hprList = ((110.815, 0, 0), (61.231, 0, 0), (-105.481, 0, 0))
mins = ToontownGlobals.FactoryLaffMinimums[3]
for cogCourse in xrange(len(posList)):
pos = posList[cogCourse]
hpr = hprList[cogCourse]
cogKart = DistributedCogKartAI.DistributedCogKartAI(
self.air, cogCourse,
pos[0], pos[1], pos[2], hpr[0], hpr[1], hpr[2],
self.air.countryClubMgr, minLaff=mins[cogCourse])
cogKart.generateWithRequired(self.zoneId)
self.cogKarts.append(cogKart)
def createCourseBoardingParty(self):
cogKartIdList = []
for cogKart in self.cogKarts:
cogKartIdList.append(cogKart.doId)
self.courseBoardingParty = DistributedBoardingPartyAI(self.air, cogKartIdList, 4)
self.courseBoardingParty.generateWithRequired(self.zoneId)
| {
"content_hash": "5a0e4ee849101d24234b93ddcb0347b2",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 89,
"avg_line_length": 39.25,
"alnum_prop": 0.6658500734933855,
"repo_name": "ToonTownInfiniteRepo/ToontownInfinite",
"id": "6ab44ecaf9b705085ca8fa6c03a185d8f03ab605",
"size": "2041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/hood/BossbotHQAI.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1703277"
},
{
"name": "C#",
"bytes": "9892"
},
{
"name": "C++",
"bytes": "5468044"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "4611"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Objective-C",
"bytes": "23212"
},
{
"name": "Puppet",
"bytes": "5245"
},
{
"name": "Python",
"bytes": "34010215"
},
{
"name": "Shell",
"bytes": "11192"
},
{
"name": "Tcl",
"bytes": "1981257"
}
],
"symlink_target": ""
} |
from scout.commands import cli
from scout.server.extensions import store
def test_update_user(mock_app, user_obj):
"""Tests the CLI that updates a user"""
runner = mock_app.test_cli_runner()
assert runner
# Test CLI base, no arguments provided
result = runner.invoke(cli, ["update", "user"])
# it should return error message
assert "Missing option" in result.output
# Test CLI with wrong user
result = runner.invoke(cli, ["update", "user", "-u", "unknown_user_id"])
# it should return error message
assert "User unknown_user_id could not be foun" in result.output
# Test CLI with right user, update user role
# remove admin role first:
result = runner.invoke(cli, ["update", "user", "-u", user_obj["_id"], "--remove-admin"])
assert "INFO Updating user {}".format(user_obj["_id"]) in result.output
updated_user = store.user_collection.find_one()
assert "admin" not in updated_user["roles"]
# Test CLI to add admin role to user
result = runner.invoke(cli, ["update", "user", "-u", user_obj["_id"], "-r", "admin"])
assert "INFO Updating user {}".format(user_obj["_id"]) in result.output
updated_user = store.user_collection.find_one()
assert "admin" in updated_user["roles"]
# Test CLI to remove an institute from a user
result = runner.invoke(
cli, ["update", "user", "-u", user_obj["_id"], "--remove-institute", "cust000"]
)
assert "INFO Updating user {}".format(user_obj["_id"]) in result.output
updated_user = store.user_collection.find_one()
assert "cust000" not in updated_user["institutes"]
# Test CLI to add a institute to user's institutes
result = runner.invoke(cli, ["update", "user", "-u", user_obj["_id"], "-i", "cust000"])
assert "INFO Updating user {}".format(user_obj["_id"]) in result.output
updated_user = store.user_collection.find_one()
assert "cust000" in updated_user["institutes"]
| {
"content_hash": "c42d70e45dada939e2598fbdb00d5004",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 92,
"avg_line_length": 42.391304347826086,
"alnum_prop": 0.6533333333333333,
"repo_name": "Clinical-Genomics/scout",
"id": "96dca4fcd2dbcf01997583ee5416b864782f1389",
"size": "1975",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/commands/update/test_update_user_cmd.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "12516"
},
{
"name": "Dockerfile",
"bytes": "1451"
},
{
"name": "HTML",
"bytes": "911931"
},
{
"name": "JavaScript",
"bytes": "32692"
},
{
"name": "Makefile",
"bytes": "1046"
},
{
"name": "Python",
"bytes": "2419990"
}
],
"symlink_target": ""
} |
"""Rasterio"""
from __future__ import absolute_import
from collections import namedtuple
import logging
try:
from logging import NullHandler
except ImportError: # pragma: no cover
class NullHandler(logging.Handler):
def emit(self, record):
pass
import warnings
from rasterio._base import (
eval_window, window_shape, window_index, gdal_version)
from rasterio.dtypes import (
bool_, ubyte, uint8, uint16, int16, uint32, int32, float32, float64,
complex_, check_dtype)
from rasterio.env import ensure_env, Env
from rasterio.compat import string_types
from rasterio.profiles import default_gtiff_profile
from rasterio.transform import Affine, guard_transform
from rasterio.vfs import parse_path
from rasterio import windows
# These modules are imported from the Cython extensions, but are also import
# here to help tools like cx_Freeze find them automatically
from rasterio import _err, coords, enums, vfs
# Classes in rasterio._io are imported below just before we need them.
__all__ = [
'band', 'open', 'copy', 'pad']
__version__ = "0.36.0"
__gdal_version__ = gdal_version()
# Rasterio attaches NullHandler to the 'rasterio' logger and its
# descendents. See
# https://docs.python.org/2/howto/logging.html#configuring-logging-for-a-library
# Applications must attach their own handlers in order to see messages.
# See rasterio/rio/main.py for an example.
log = logging.getLogger(__name__)
log.addHandler(NullHandler())
@ensure_env
def open(path, mode='r', driver=None, width=None, height=None,
count=None, crs=None, transform=None, dtype=None, nodata=None,
**kwargs):
"""Open file at ``path`` in ``mode`` 'r' (read), 'r+' (read and
write), or 'w' (write) and return a dataset Reader or Updater
object.
In write mode, a driver name such as "GTiff" or "JPEG" (see GDAL
docs or ``gdal_translate --help`` on the command line),
``width`` (number of pixels per line) and ``height`` (number of
lines), the ``count`` number of bands in the new file must be
specified. Additionally, the data type for bands such as
``rasterio.ubyte`` for 8-bit bands or ``rasterio.uint16`` for
16-bit bands must be specified using the ``dtype`` argument.
Parameters
----------
mode: string
"r" (read), "r+" (read/write), or "w" (write)
driver: string
driver code specifying the format name (e.g. "GTiff" or
"JPEG"). See GDAL docs at
http://www.gdal.org/formats_list.html (optional, required
for writing).
width: int
number of pixels per line
(optional, required for write)
height: int
number of lines
(optional, required for write)
count: int > 0
number of bands
(optional, required for write)
dtype: rasterio.dtype
the data type for bands such as ``rasterio.ubyte`` for
8-bit bands or ``rasterio.uint16`` for 16-bit bands
(optional, required for write)
crs: dict or string
Coordinate reference system
(optional, recommended for write)
transform: Affine instance
Affine transformation mapping the pixel space to geographic
space (optional, recommended for writing).
nodata: number
Defines pixel value to be interpreted as null/nodata
(optional, recommended for write)
Returns
-------
A ``DatasetReader`` or ``DatasetUpdater`` object.
Notes
-----
In write mode, you must specify at least ``width``, ``height``,
``count`` and ``dtype``.
A coordinate reference system for raster datasets in write mode
can be defined by the ``crs`` argument. It takes Proj4 style
mappings like
.. code::
{'proj': 'longlat', 'ellps': 'WGS84', 'datum': 'WGS84',
'no_defs': True}
An affine transformation that maps ``col,row`` pixel coordinates
to ``x,y`` coordinates in the coordinate reference system can be
specified using the ``transform`` argument. The value should be
an instance of ``affine.Affine``
.. code:: python
>>> from affine import Affine
>>> transform = Affine(0.5, 0.0, -180.0, 0.0, -0.5, 90.0)
These coefficients are shown in the figure below.
.. code::
| x | | a b c | | c |
| y | = | d e f | | r |
| 1 | | 0 0 1 | | 1 |
a: rate of change of X with respect to increasing column,
i.e. pixel width
b: rotation, 0 if the raster is oriented "north up"
c: X coordinate of the top left corner of the top left pixel
d: rotation, 0 if the raster is oriented "north up"
e: rate of change of Y with respect to increasing row,
usually a negative number (i.e. -1 * pixel height) if
north-up.
f: Y coordinate of the top left corner of the top left pixel
A 6-element sequence of the affine transformation matrix
coefficients in ``c, a, b, f, d, e`` order, (i.e. GDAL
geotransform order) will be accepted until 1.0 (deprecated).
A virtual filesystem can be specified. The ``vfs`` parameter may
be an Apache Commons VFS style string beginning with "zip://" or
"tar://"". In this case, the ``path`` must be an absolute path
within that container.
"""
if not isinstance(path, string_types):
raise TypeError("invalid path: {0!r}".format(path))
if mode and not isinstance(mode, string_types):
raise TypeError("invalid mode: {0!r}".format(mode))
if driver and not isinstance(driver, string_types):
raise TypeError("invalid driver: {0!r}".format(driver))
if dtype and not check_dtype(dtype):
raise TypeError("invalid dtype: {0!r}".format(dtype))
if transform:
transform = guard_transform(transform)
elif 'affine' in kwargs:
affine = kwargs.pop('affine')
transform = guard_transform(affine)
# Get AWS credentials if we're attempting to access a raster
# on S3.
pth, archive, scheme = parse_path(path)
if scheme == 's3':
Env().get_aws_credentials()
log.debug("AWS credentials have been obtained")
# Create dataset instances and pass the given env, which will
# be taken over by the dataset's context manager if it is not
# None.
if mode == 'r':
from rasterio._io import RasterReader
s = RasterReader(path)
elif mode == 'r+':
from rasterio._io import writer
s = writer(path, mode)
elif mode == 'r-':
from rasterio._base import DatasetReader
s = DatasetReader(path)
elif mode == 'w':
from rasterio._io import writer
s = writer(path, mode, driver=driver,
width=width, height=height, count=count,
crs=crs, transform=transform, dtype=dtype,
nodata=nodata, **kwargs)
else:
raise ValueError(
"mode string must be one of 'r', 'r+', or 'w', not %s" % mode)
s.start()
return s
@ensure_env
def copy(src, dst, **kw):
"""Copy a source raster to a new destination with driver specific
creation options.
Parameters
----------
src: string
an existing raster file
dst: string
valid path to output file.
Returns
-------
None
Raises
------
ValueError:
If source path is not a valid Dataset
Notes
-----
A ``driver`` keyword argument with value like 'GTiff' or 'JPEG' is
used to control the output format.
This is the one way to create write-once files like JPEGs.
"""
from rasterio._copy import RasterCopier
return RasterCopier()(src, dst, **kw)
def drivers(**kwargs):
"""Create a gdal environment with registered drivers and creation
options.
This function is deprecated; please use ``env.Env`` instead.
Parameters
----------
**kwargs:: keyword arguments
Configuration options that define GDAL driver behavior
See https://trac.osgeo.org/gdal/wiki/ConfigOptions
Returns
-------
GDALEnv responsible for managing the environment.
Notes
-----
Use as a context manager, ``with rasterio.drivers(): ...``
"""
warnings.warn("Deprecated; Use env.Env instead", DeprecationWarning)
return Env(**kwargs)
Band = namedtuple('Band', ['ds', 'bidx', 'dtype', 'shape'])
def band(ds, bidx):
"""Wraps a dataset and a band index up as a 'Band'
Parameters
----------
ds: rasterio.RasterReader
Open rasterio dataset
bidx: int
Band number, index starting at 1
Returns
-------
a rasterio.Band
"""
return Band(
ds,
bidx,
set(ds.dtypes).pop(),
ds.shape)
def pad(array, transform, pad_width, mode=None, **kwargs):
"""pad array and adjust affine transform matrix.
Parameters
----------
array: ndarray
Numpy ndarray, for best results a 2D array
transform: Affine transform
transform object mapping pixel space to coordinates
pad_width: int
number of pixels to pad array on all four
mode: str or function
define the method for determining padded values
Returns
-------
(array, transform): tuple
Tuple of new array and affine transform
Notes
-----
See numpy docs for details on mode and other kwargs:
http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.pad.html
"""
import numpy as np
transform = guard_transform(transform)
padded_array = np.pad(array, pad_width, mode, **kwargs)
padded_trans = list(transform)
padded_trans[2] -= pad_width * padded_trans[0]
padded_trans[5] -= pad_width * padded_trans[4]
return padded_array, Affine(*padded_trans[:6])
def get_data_window(arr, nodata=None):
warnings.warn("Deprecated; Use rasterio.windows instead", DeprecationWarning)
return windows.get_data_window(arr, nodata)
def window_union(data):
warnings.warn("Deprecated; Use rasterio.windows instead", DeprecationWarning)
return windows.union(data)
def window_intersection(data):
warnings.warn("Deprecated; Use rasterio.windows instead", DeprecationWarning)
return windows.intersection(data)
def windows_intersect(data):
warnings.warn("Deprecated; Use rasterio.windows instead", DeprecationWarning)
return windows.intersect(data)
| {
"content_hash": "a61cba56b36e4d24cc28a456aa766a64",
"timestamp": "",
"source": "github",
"line_count": 326,
"max_line_length": 81,
"avg_line_length": 31.726993865030675,
"alnum_prop": 0.6440104418447259,
"repo_name": "ryfeus/lambda-packs",
"id": "556e5c754edf56592bbee0d6e3a4f7a958028626",
"size": "10343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Rasterio_osgeo_shapely_PIL_pyproj_numpy/source/rasterio/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import division
import gc
import os
from builtins import object
from past.utils import old_div
import tensorflow as tf
import numpy as np
import deep_architect.core as co
from deep_architect.helpers.tensorflow_eager_support import set_is_training
tfe = tf.contrib.eager
class ENASEvaluator:
"""Trains and evaluates a classifier on some datasets passed as argument.
Uses a number of training tricks, namely, early stopping, keeps the model
that achieves the best validation performance, reduces the step size
after the validation performance fails to increases for some number of
epochs.
"""
def __init__(self,
train_dataset,
val_dataset,
num_classes,
weight_sharer,
optimizer_type='adam',
batch_size=128,
learning_rate_init=1e-3,
display_step=50,
log_output_to_terminal=True,
test_dataset=None,
max_controller_steps=50):
self.train_dataset = train_dataset
self.val_dataset = val_dataset
self.num_classes = num_classes
self.in_dim = list(train_dataset.next_batch(1)[0].shape[1:])
self.display_step = display_step
self.learning_rate_init = learning_rate_init
self.batch_size = batch_size
self.optimizer_type = optimizer_type
self.log_output_to_terminal = log_output_to_terminal
self.test_dataset = test_dataset
self.num_batches = int(
old_div(self.train_dataset.get_num_examples(), self.batch_size))
self.batch_counter = 0
self.epoch = 0
self.controller_step = 0
self.child_step = 0
self.controller_mode = False
self.max_controller_steps = max_controller_steps
self.weight_sharer = weight_sharer
if self.optimizer_type == 'adam':
self.optimizer = tf.train.AdamOptimizer(
learning_rate=self.learning_rate_init)
elif self.optimizer_type == 'sgd':
self.optimizer = tf.train.GradientDescentOptimizer(
learning_rate=self.learning_rate_init)
elif self.optimizer_type == 'sgd_mom':
self.optimizer = tf.train.MomentumOptimizer(
learning_rate=self.learning_rate_init, momentum=0.99)
else:
raise ValueError("Unknown optimizer.")
def save_state(self, folderpath):
weight_sharer_file = os.path.join(folderpath, "weight_sharer")
self.weight_sharer.save(weight_sharer_file)
def load_state(self, folderpath):
weight_sharer_file = os.path.join(folderpath, "weight_sharer")
self.weight_sharer.load(weight_sharer_file)
def _compute_accuracy(self, inputs, outputs, dataset):
nc = 0
num_left = dataset.get_num_examples()
set_is_training(outputs, False)
loss = 0
while num_left > 0:
X_batch, y_batch = dataset.next_batch(self.batch_size)
X = tf.convert_to_tensor(X_batch, np.float32) #.gpu()
y = tf.convert_to_tensor(y_batch, np.float32) #.gpu()
co.forward({inputs['in']: X})
logits = outputs['out'].val
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
num_correct = tf.reduce_sum(tf.cast(correct_prediction, "float"))
loss += tf.reduce_sum(
tf.nn.softmax_cross_entropy_with_logits(logits=logits,
labels=y))
nc += num_correct
# update the number of examples left.
eff_batch_size = y_batch.shape[0]
num_left -= eff_batch_size
acc = old_div(float(nc), dataset.get_num_examples())
loss = old_div(float(loss), dataset.get_num_examples())
return acc, loss
def _compute_loss(self, inputs, outputs, X, y, loss_metric):
X = tf.constant(X).gpu()
y = tf.constant(y).gpu()
co.forward({inputs['in']: X})
logits = outputs['out'].val
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
loss_metric(loss)
return loss
def eval(self, inputs, outputs):
results = {}
if self.controller_mode:
# Compute accuracy of model
val_acc, loss = self._compute_accuracy(inputs, outputs,
self.val_dataset)
results['validation_accuracy'] = val_acc
# Log validation info
self.controller_step += 1
if self.log_output_to_terminal: # and self.controller_step % self.display_step == 0:
log_string = ""
log_string += "ctrl_step={:<6d}".format(self.controller_step)
log_string += " loss={:<7.3f}".format(loss)
log_string += " acc={:<6.4f}".format(val_acc)
print(log_string)
# If controller phase finished, update epoch and switch back to
# updating child params
if self.controller_step % self.max_controller_steps == 0:
self.controller_mode = False
self.epoch += 1
results['epoch'] = self.epoch
print('Starting Image model mode')
else:
# Update child model parameters
X_batch, y_batch = self.train_dataset.next_batch(self.batch_size)
set_is_training(outputs, True)
with tf.device('/gpu:0'):
loss_metric = tfe.metrics.Mean('loss')
self.optimizer.minimize(lambda: self._compute_loss(
inputs, outputs, X_batch, y_batch, loss_metric))
# Log batch info
self.child_step += 1
if self.log_output_to_terminal and self.child_step % self.display_step == 0:
log_string = ""
log_string += "epoch={:<6d}".format(self.epoch)
log_string += " ch_step={:<6d}".format(self.child_step)
log_string += " loss={:<8.6f}".format(loss_metric.result())
print(log_string)
epoch_end = self.train_dataset.iter_i == 0
# If epoch completed, switch to updating controller
results['validation_accuracy'] = -1
if epoch_end:
self.controller_mode = True
print('Starting Controller Mode')
gc.collect()
return results
| {
"content_hash": "0e57b46b1e5c4b65b61df360db80e44c",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 97,
"avg_line_length": 39.24404761904762,
"alnum_prop": 0.5696951311997573,
"repo_name": "negrinho/deep_architect",
"id": "cb20a897e0ce3fa0bb12341703714b3e822bc2d1",
"size": "6593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev/enas/evaluator/enas_evaluator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "572577"
},
{
"name": "Shell",
"bytes": "11377"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'ProjectMember.member'
db.alter_column('cms_projectmember', 'member_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Member'], null=True))
def backwards(self, orm):
# Changing field 'ProjectMember.member'
db.alter_column('cms_projectmember', 'member_id', self.gf('django.db.models.fields.related.ForeignKey')(default=0, to=orm['cms.Member']))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.blogpost': {
'Meta': {'ordering': "['-date']", 'object_name': 'BlogPost'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Member']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'edit_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post': ('django.db.models.fields.TextField', [], {}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'blogposts'", 'blank': 'True', 'to': "orm['cms.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.member': {
'Meta': {'ordering': "['user__last_name']", 'object_name': 'Member'},
'blurb': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'classification': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'hometown': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'interests': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'cms.news': {
'Meta': {'object_name': 'News'},
'content': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.page': {
'Meta': {'object_name': 'Page'},
'content': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pub_front_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pub_menu': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.project': {
'Meta': {'object_name': 'Project'},
'category': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Member']", 'through': "orm['cms.ProjectMember']", 'symmetrical': 'False'}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'cms.projectmember': {
'Meta': {'object_name': 'ProjectMember'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coordinator': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Member']", 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Project']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'volunteer_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'cms.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cms']
| {
"content_hash": "c06c118c9b0076ab0d696a70c67557a6",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 182,
"avg_line_length": 69.832,
"alnum_prop": 0.541871921182266,
"repo_name": "ncsu-stars/Stars-CMS",
"id": "e4cb7726b5b81970c0f6b56c02f2ec91e68b44ae",
"size": "8747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cms/migrations/0018_auto__chg_field_projectmember_member.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "12004"
},
{
"name": "HTML",
"bytes": "60895"
},
{
"name": "JavaScript",
"bytes": "60468"
},
{
"name": "Python",
"bytes": "247430"
}
],
"symlink_target": ""
} |
"""
Display drag from the app into the graph widget and the event bridge.
This is similar to the hello world sample.
"""
import sys
from PyQt5 import QtWidgets
from PyQt5.QtCore import QSize
from PyQt5.QtGui import QDrag
from PyQt5.QtWidgets import QGridLayout
from PyQt5.QtWidgets import QMainWindow
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtWidgets import QVBoxLayout
from PyQt5.QtWidgets import QWidget
import qmxgraph.mime
from qmxgraph.widget import EventsBridge
from qmxgraph.widget import QmxGraph
def create_drag_button(text, qmx_style, parent=None):
button = DragButton(parent)
button.setText(text)
# # You can set an icon to the button with:
# button.setIcon(...)
button.setProperty('qmx_style', qmx_style)
button.setToolTip("Drag me into the graph widget")
return button
class DragButton(QPushButton):
"""
Start a drag even with custom data.
"""
def mousePressEvent(self, event):
mime_data = qmxgraph.mime.create_qt_mime_data(
{
'vertices': [
{
'dx': 0,
'dy': 0,
'width': 120,
'height': 40,
'label': self.text(),
'style': self.property('qmx_style'),
}
]
}
)
drag = QDrag(self)
drag.setMimeData(mime_data)
# # You can set icons like the following:
# w, h = self.property('component_size')
# # Image displayed while dragging.
# drag.setPixmap(self.icon().pixmap(w, h))
# # Position of the image where the mouse is centered.
# drag.setHotSpot(QPoint(w // 2, h // 2)
drag.exec_()
class DragAndDropWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.setProperty('name', 'adas')
self.setMinimumSize(QSize(640, 480))
self.setWindowTitle("Drag&Drop Styles")
central_widget = QWidget(self)
self.setCentralWidget(central_widget)
self.button_pane = QWidget(self)
self.button_pane.setEnabled(False)
red_button = create_drag_button('RED', 'fillColor=#D88', self.button_pane)
green_button = create_drag_button('GREEN', 'fillColor=#8D8', self.button_pane)
blue_button = create_drag_button('BLUE', 'fillColor=#88D', self.button_pane)
self.graph_widget = QmxGraph(parent=central_widget)
self.events_bridge = self.create_events_bridge()
self.graph_widget.loadFinished.connect(self.graph_load_handler)
main_layout = QGridLayout(self)
central_widget.setLayout(main_layout)
main_layout.addWidget(self.graph_widget, 0, 0)
main_layout.addWidget(self.button_pane, 0, 1)
buttons_layout = QVBoxLayout(self.button_pane)
self.button_pane.setLayout(buttons_layout)
buttons_layout.addWidget(red_button)
buttons_layout.addWidget(green_button)
buttons_layout.addWidget(blue_button)
def create_events_bridge(self):
##################################
# Based in `EventsBridge` docstring.
def on_cells_added_handler(cell_ids):
print(f'added {cell_ids}')
qmx = widget.api
for cid in cell_ids:
label = qmx.get_label(cid)
qmx.set_label(cid, f'{label} ({cid})')
def on_terminal_changed_handler(cell_id, terminal_type, new_terminal_id, old_terminal_id):
print(
f'{terminal_type} of {cell_id} changed from'
f' {old_terminal_id} to {new_terminal_id}'
)
def on_cells_removed_handler(cell_ids):
print(f'removed {cell_ids}')
def on_cells_bounds_changed_handler(changed_cell_bounds):
print(f'cells bounds changed {changed_cell_bounds}')
events_bridge = EventsBridge()
widget = self.graph_widget
widget.set_events_bridge(events_bridge)
events_bridge.on_cells_added.connect(on_cells_added_handler)
events_bridge.on_cells_removed.connect(on_cells_removed_handler)
events_bridge.on_terminal_changed.connect(on_terminal_changed_handler)
events_bridge.on_cells_bounds_changed.connect(on_cells_bounds_changed_handler)
#
##################################
return events_bridge
def graph_load_handler(self, is_loaded):
self.button_pane.setEnabled(is_loaded)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
mainWin = DragAndDropWindow()
mainWin.show()
sys.exit(app.exec_())
| {
"content_hash": "574ffac662c726732b7125deacd21c43",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 98,
"avg_line_length": 33.62589928057554,
"alnum_prop": 0.6050492083868207,
"repo_name": "ESSS/qmxgraph",
"id": "47945ee9c91833613f9582cd77c0fbbd4fe95637",
"size": "4674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/drag_and_drop/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1123"
},
{
"name": "HTML",
"bytes": "2717"
},
{
"name": "JavaScript",
"bytes": "108354"
},
{
"name": "Makefile",
"bytes": "2276"
},
{
"name": "Python",
"bytes": "259282"
},
{
"name": "Shell",
"bytes": "173"
}
],
"symlink_target": ""
} |
import os
import json
from datetime import datetime
from flask import Flask
from flask import render_template, url_for, request, jsonify
from flask import send_from_directory
from flask.ext.sqlalchemy import SQLAlchemy
from werkzeug.utils import secure_filename
ALLOWED_EXTENSIONS = ['JPEG', 'JPG', 'PNG', 'GIF']
app = Flask(__name__)
# Setup routing for static files
app.jinja_env.globals['static'] = (
lambda filename: url_for('static', filename=filename))
# Setup folder paths for uploads and SQL
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.getcwd() + '/tmp/store.db'
app.config['UPLOAD_FOLDER'] = os.path.join(os.getcwd(), 'tmp/uploads')
# Instantiate db session
db = SQLAlchemy(app)
# Models
class Zone(db.Model):
id = db.Column(db.Integer, primary_key=True)
label = db.Column(db.String(80))
shape = db.Column(db.Text) # store as a json blob
extras = db.Column(db.Text) # store extra stuff as a blob
floor_id = db.Column(db.Integer, db.ForeignKey('floor.id'))
floor = db.relationship('Floor',
backref=db.backref('zones', lazy='dynamic'))
def __init__(self, label, shape, floor, extras=None):
self.shape = shape
self.label = label
self.floor = floor
if extras is None:
self.extras = '{}'
def __repr__(self):
return '<Zone %r>' % self.label
def toObj(self):
d = {}
d['label'] = self.label
d['shape'] = json.loads(self.shape)
return d
class Floor(db.Model):
id = db.Column(db.Integer, primary_key=True)
label = db.Column(db.String(50))
img_name = db.Column(db.String(80))
extras = db.Column(db.Text) # store extra stuff as a blob
def __init__(self, label, img_name, extras=None):
self.label = label
self.img_name = img_name
if extras is None:
self.extras = ''
def __repr__(self):
return '<Floor %r>' % self.label
def toObj(self):
z = []
for zone in self.zones:
z.append(zone.toObj())
return {
'label': self.label,
'imgname': self.img_name,
'zones': z
}
# Misc
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].upper() in ALLOWED_EXTENSIONS
# Routes
@app.route('/')
def list_floors():
floors = Floor.query.all()
return render_template('floor_list.html', floors=floors)
@app.route('/create', methods=['GET', 'POST'])
def create_new_floor():
if request.method == 'GET':
return render_template('create.html')
else:
# Otherwise it's a POST request
try:
data = json.loads(request.data)
# request.data should be a Floor JSON object
# see static/js/models.js
fid = process_floor_json(data)
return jsonify(success=True, floorId=fid)
except:
return jsonify(success=False, msg='Invalid Floor JSON')
def process_floor_json(data):
floor = Floor(data['label'], data['img'])
db.session.add(floor)
for zone in data['zones']:
shape_json = json.dumps(zone['shape'])
z = Zone(zone['label'], shape_json, floor, zone['extras'])
db.session.add(z)
db.session.commit()
return floor.id
@app.route('/view/<floor_id>')
def get_floor_data(floor_id):
try:
floor = Floor.query.filter(Floor.id == int(floor_id)).first()
if floor:
return render_template('view.html', floor=floor)
else:
return render_template('not_found.html')
except:
return render_template('not_found.html')
@app.route('/fetch/floor/<floor_id>')
def fetch_floor_data(floor_id):
try:
floor = Floor.query.filter(Floor.id == int(floor_id)).first()
if floor:
return jsonify(success=True, floor=floor.toObj(), msg='')
else:
return jsonify(success=False, msg='Could not find floor')
except:
return jsonify(success=False, msg='Could not find floor')
@app.route('/up', methods=['POST'])
def upload_file():
if request.method == 'POST':
filename = secure_filename(request.headers.get('X-File-Name'))
if not allowed_file(filename):
return jsonify(success=False, msg='Invalid image format')
filename = datetime.now().strftime('%Y%m%d%H%M%S%f') + '-' + filename
try:
f = open(os.path.join(app.config['UPLOAD_FOLDER'], filename), 'w')
f.write(request.data)
f.close()
return jsonify(success=True, imgname=filename)
except:
return jsonify(success=False, msg='Could not save file')
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename)
if __name__ == '__main__':
app.run(debug=True, port=5000)
| {
"content_hash": "43bded2ea3a197eb737bb519fcd7d779",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 84,
"avg_line_length": 28.662790697674417,
"alnum_prop": 0.598580121703854,
"repo_name": "SoftwareDefinedBuildings/Building-Mapper",
"id": "e63524cebcc6aed28fed35e42a758f1ad5b042f6",
"size": "4930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8342"
},
{
"name": "JavaScript",
"bytes": "196430"
},
{
"name": "Python",
"bytes": "4987"
},
{
"name": "Shell",
"bytes": "141"
}
],
"symlink_target": ""
} |
from django import http
from mox import IsA
from horizon import api
from horizon import context_processors
from horizon import middleware
from horizon import test
class ContextProcessorTests(test.TestCase):
def setUp(self):
super(ContextProcessorTests, self).setUp()
self._prev_catalog = self.request.user.service_catalog
context_processors.horizon = self._real_horizon_context_processor
def tearDown(self):
super(ContextProcessorTests, self).tearDown()
self.request.user.service_catalog = self._prev_catalog
def test_authorized_tenants(self):
tenant_list = self.context['authorized_tenants']
self.request.user.authorized_tenants = None # Reset from setUp
self.mox.StubOutWithMock(api, 'tenant_list_for_token')
api.tenant_list_for_token(IsA(http.HttpRequest),
self.token.id,
endpoint_type='internalURL') \
.AndReturn(tenant_list)
self.mox.ReplayAll()
middleware.HorizonMiddleware().process_request(self.request)
context = context_processors.horizon(self.request)
self.assertEqual(len(context['authorized_tenants']), 1)
tenant = context['authorized_tenants'].pop()
self.assertEqual(tenant.id, self.tenant.id)
| {
"content_hash": "639b2c8a265ddaf3c325b3a7af6b83da",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 73,
"avg_line_length": 39.6764705882353,
"alnum_prop": 0.6604892512972572,
"repo_name": "developerworks/horizon",
"id": "30102282b257579fc0419106f60c85ae73e88852",
"size": "2158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "horizon/tests/context_processor_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "154893"
},
{
"name": "Python",
"bytes": "697221"
},
{
"name": "Shell",
"bytes": "11065"
}
],
"symlink_target": ""
} |
"""SumgMug OAuth client."""
from typing import Optional
import signal
import socket
import subprocess
import sys
import threading
from urllib import parse
import webbrowser
from dataclasses import dataclass
import bottle
import rauth
import requests_oauthlib
OAUTH_ORIGIN = 'https://secure.smugmug.com'
REQUEST_TOKEN_URL = OAUTH_ORIGIN + '/services/oauth/1.0a/getRequestToken'
ACCESS_TOKEN_URL = OAUTH_ORIGIN + '/services/oauth/1.0a/getAccessToken'
AUTHORIZE_URL = OAUTH_ORIGIN + '/services/oauth/1.0a/authorize'
API_ORIGIN = 'https://api.smugmug.com'
class Error(Exception):
"""Base class for all exception of this module."""
class LoginError(Error):
"""Raised on login errors."""
@dataclass
class ApiKey:
"""A SmugMug API key."""
key: str
secret: str
@dataclass
class RequestToken:
"""An OAuth request token."""
token: str
secret: str
@dataclass
class AccessToken:
"""An OAuth access token."""
token: str
secret: str
@dataclass
class _State:
running: bool
port: int
app: bottle.Bottle
request_token: Optional[RequestToken] = None
access_token: Optional[AccessToken] = None
class SmugMugOAuth():
"""SumgMug OAuth client."""
def __init__(self, api_key: ApiKey):
self._service = self._create_service(api_key)
def _get_free_port(self) -> int:
sock = socket.socket()
sock.bind(('', 0))
port = sock.getsockname()[1]
sock.close()
return port
def request_access_token(self) -> AccessToken:
"""Request an OAuth access token for the SmugMug service."""
port = self._get_free_port()
state = _State(running=True, port=port, app=bottle.Bottle())
state.app.route('/', callback=lambda s=state: self._index(s))
state.app.route('/callback', callback=lambda s=state: self._callback(s))
def abort(signum, frame):
del signum, frame # Unused.
print('SIGINT received, aborting...')
state.app.close()
state.running = False
sys.exit(1)
signal.signal(signal.SIGINT, abort)
def _start_web_server() -> None:
bottle.run(state.app, port=port)
thread = threading.Thread(target=_start_web_server)
thread.daemon = True
try:
thread.start()
login_url = f'http://localhost:{port}/'
print('Started local server.')
print(f'Visit {login_url} to grant SmugCli access to your SmugMug '
'account.')
print(f'Opening {login_url} in default browser...')
if self._is_cygwin():
try:
return_code = subprocess.call(['cygstart', login_url],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
success = (return_code == 0)
except Exception: # pylint: disable=broad-except
success = False
else:
success = webbrowser.open(login_url)
if not success:
print('Could not start default browser automatically.')
print(f'Please visit {login_url} to complete login process.')
while state.running and thread.is_alive():
thread.join(1)
finally:
state.app.close()
if state.access_token is None:
raise LoginError("Failed requesting access token.")
return state.access_token
def get_oauth(
self, access_token: AccessToken
) -> requests_oauthlib.OAuth1:
"""Returns an OAuth1 instance."""
return requests_oauthlib.OAuth1(
self._service.consumer_key,
self._service.consumer_secret,
resource_owner_key=access_token.token,
resource_owner_secret=access_token.secret)
def _create_service(self, api_key: ApiKey) -> rauth.OAuth1Service:
return rauth.OAuth1Service(
name='smugcli',
consumer_key=api_key.key,
consumer_secret=api_key.secret,
request_token_url=REQUEST_TOKEN_URL,
access_token_url=ACCESS_TOKEN_URL,
authorize_url=AUTHORIZE_URL,
base_url=API_ORIGIN + '/api/v2')
def _index(self, state: _State) -> None:
"""Route initiating the authorization process."""
request_token, request_token_secret = self._service.get_request_token(
params={'oauth_callback': f'http://localhost:{state.port}/callback'})
state.request_token = RequestToken(
token=request_token, secret=request_token_secret)
auth_url = self._service.get_authorize_url(request_token)
auth_url = self._add_auth_params(
auth_url, access='Full', permissions='Modify')
bottle.redirect(auth_url)
def _callback(self, state: _State) -> str:
"""Route invoked after the user completes the authorization request."""
if state.request_token is None:
raise LoginError("No request token obtained.")
oauth_verifier = bottle.request.query['oauth_verifier'] # type: ignore
(token, secret) = self._service.get_access_token(
state.request_token.token, state.request_token.secret,
params={'oauth_verifier': oauth_verifier})
state.access_token = AccessToken(token, secret)
state.app.close()
state.running = False
return 'Login successful. You may close this window.'
def _add_auth_params(
self, auth_url: str, access: str, permissions: str
) -> str:
parts = parse.urlsplit(auth_url)
query = parse.parse_qsl(parts.query, True)
query.append(('Access', access))
query.append(('Permissions', permissions))
new_query = parse.urlencode(query, True)
return parse.urlunsplit(
(parts.scheme, parts.netloc, parts.path, new_query, parts.fragment))
def _is_cygwin(self) -> bool:
try:
return_code = subprocess.call(['which', 'cygstart'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return return_code == 0
except Exception: # pylint: disable=broad-except
return False
| {
"content_hash": "8347bc8e9578f45666093bd990e1f0fe",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 77,
"avg_line_length": 29.851282051282052,
"alnum_prop": 0.6488575846074558,
"repo_name": "graveljp/smugcli",
"id": "62af6a6b96a6fa78d86a03b24e9e27163395a027",
"size": "5821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smugcli/smugmug_oauth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "223689"
}
],
"symlink_target": ""
} |
from content import Content
__author__ = 'carlos'
from bottle import get, post, request, run # or route
import json
import content
@post('content/')
def create():
content = Content.from_json(request.json['cmd'])
return "OK"
run(host='localhost', port=8080, debug=True)
| {
"content_hash": "111045c3c032e15c0723410aae7fc863",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 54,
"avg_line_length": 20.142857142857142,
"alnum_prop": 0.6950354609929078,
"repo_name": "distributed-cms/rest-api",
"id": "c361d9bb51d2ee6bb2fa898eb5d70ef6202df5b3",
"size": "282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "srv.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Protocol Buffer",
"bytes": "1394"
},
{
"name": "Python",
"bytes": "1399"
},
{
"name": "Shell",
"bytes": "410"
}
],
"symlink_target": ""
} |
import sys
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from ...utils import get_indices, create_indices, rebuild_indices, delete_indices
try:
raw_input
except NameError:
raw_input = input
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.stdout = Unbuffered(sys.stdout)
class ESCommandError(CommandError):
pass
class Command(BaseCommand):
help = ''
def add_arguments(self, parser):
parser.add_argument('--list', action='store_true', dest='list', default=False)
parser.add_argument('--initialize', action='store_true', dest='initialize', default=False)
parser.add_argument('--rebuild', action='store_true', dest='rebuild', default=False)
parser.add_argument('--cleanup', action='store_true', dest='cleanup', default=False)
parser.add_argument('--no_input', '--noinput', action='store_true', dest='no_input', default=False)
parser.add_argument('--indexes', action='store', dest='indexes', default='')
def handle(self, *args, **options):
no_input = options.get('no_input')
requested_indexes = options.get('indexes', '') or []
if requested_indexes:
requested_indexes = requested_indexes.split(',')
if options.get('list'):
self.subcommand_list()
elif options.get('initialize'):
self.subcommand_initialize(requested_indexes, no_input)
elif options.get('rebuild'):
self.subcommand_rebuild(requested_indexes, no_input)
elif options.get('cleanup'):
self.subcommand_cleanup(requested_indexes, no_input)
def subcommand_list(self):
print("Available ES indexes:")
for index_name, type_classes in get_indices().items():
print(" - index '{0}':".format(index_name))
for type_class in type_classes:
print(" - type '{0}'".format(type_class.get_type_name()))
def subcommand_initialize(self, indexes=None, no_input=False):
user_input = 'y' if no_input else ''
while user_input != 'y':
user_input = raw_input('Are you sure you want to initialize {0} index(es)? [y/N]: '.format('the ' + ', '.join(indexes) if indexes else '**ALL**')).lower()
if user_input == 'n':
break
if user_input == 'y':
sys.stdout.write("Creating ES indexes: ")
results, aliases = create_indices(indices=indexes)
sys.stdout.write("complete.\n")
for alias, index in aliases:
print("'{0}' aliased to '{1}'".format(alias, index))
def subcommand_cleanup(self, indexes=None, no_input=False):
user_input = 'y' if no_input else ''
while user_input != 'y':
user_input = raw_input('Are you sure you want to clean up (ie DELETE) {0} index(es)? [y/N]: '.format('the ' + ', '.join(indexes) if indexes else '**ALL**')).lower()
if user_input == 'n':
break
if user_input == 'y':
sys.stdout.write("Deleting ES indexes: ")
indices = delete_indices(indices=indexes)
sys.stdout.write("complete.\n")
for index in indices:
print("'{0}' index deleted".format(index))
else:
print("{0} removed.".format(len(indices)))
def subcommand_rebuild(self, indexes, no_input=False):
if getattr(settings, 'DEBUG', False):
import warnings
warnings.warn('Rebuilding with `settings.DEBUG = True` can result in out of memory crashes. See https://docs.djangoproject.com/en/stable/ref/settings/#debug', stacklevel=2)
# make sure the user continues explicitly after seeing this warning
no_input = False
user_input = 'y' if no_input else ''
while user_input != 'y':
user_input = raw_input('Are you sure you want to rebuild {0} index(es)? [y/N]: '.format('the ' + ', '.join(indexes) if indexes else '**ALL**')).lower()
if user_input in ['n', '']:
break
if user_input == 'y':
sys.stdout.write("Rebuilding ES indexes: ")
results, aliases = rebuild_indices(indices=indexes)
sys.stdout.write("complete.\n")
for alias, index in aliases:
print("'{0}' rebuilt and aliased to '{1}'".format(alias, index))
else:
print("You chose not to rebuild indices.")
| {
"content_hash": "e10a121befd7ef1a1e6dddd782e36616",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 184,
"avg_line_length": 41.415929203539825,
"alnum_prop": 0.5918803418803419,
"repo_name": "jaddison/django-simple-elasticsearch",
"id": "f9ad91efa8e93cfeda4d76a8e9d1040aee41bd51",
"size": "4680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simple_elasticsearch/management/commands/es_manage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1579"
},
{
"name": "Python",
"bytes": "51219"
}
],
"symlink_target": ""
} |
_remote_command = lambda magic_word: 'remote:{}'.format(magic_word)
class Message(object):
def __init__(self, content, additional = None):
self.__content = content
self.__additional = additional
@property
def content(self):
return self.__content
@property
def additional(self):
return self.__additional
class RemoteSignal(object):
PING = _remote_command('ping')
PAUSE = _remote_command('pause')
RESUME = _remote_command('resume')
| {
"content_hash": "36e14bb48d8336f40244a7401700c031",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 67,
"avg_line_length": 26.526315789473685,
"alnum_prop": 0.628968253968254,
"repo_name": "shiroyuki/vireo",
"id": "dcd02670fb6e490767e37e3b46e457edc9424afe",
"size": "504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vireo/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "978"
},
{
"name": "Python",
"bytes": "118263"
}
],
"symlink_target": ""
} |
import socket
import bluetooth
import threading
class ServerWalle:
def __init__(self,host,port):
self.host = host
self.port = port
self.bt_address = '00:15:FF:F3:E2:09'
self.bt_port = 1
self.buff_bt = 1024
self.buff_sock = 1024
self.run_thread = True
self.blue_sock = None
self.sock_server = None
self.server_thread = None
self.client = None
self.connectBot()
self.startServer()
def connectBot(self):
self.blue_sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
self.blue_sock.connect((self.bt_address,self.bt_port))
print "Walle Connected"
def startServer(self):
self.sock_server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.sock_server.bind((self.host,self.port))
self.sock_server.listen(5)
self.server_thread = threading.Thread(target=self.serveForever)
self.server_thread.start()
def serveForever(self):
(self.client, address) = self.sock_server.accept()
print "Cliente Conectado"
print address
while self.run_thread:
cmd = self.client.recv(self.buff_sock)
self.processMessage(cmd)
self.client.close()
print "Coneccion cerrada"
def processMessage(self,msg):
if msg == "AA":
self.blue_sock.send("AA")
res = self.blue_sock.recv(self.buff_bt)
self.client.send(res)
elif msg == "BB":
self.blue_sock.send("BB")
res = self.blue_sock.recv(self.buff_bt)
self.client.send(res)
elif msg == "DD":
self.blue_sock.send("DD")
res = self.blue_sock.recv(self.buff_bt)
self.client.send(res)
elif msg == "DIST":
self.blue_sock.send("DIST")
dist = self.blue_sock.recv(self.buff_bt)
self.client.send(dist)
elif msg == "CC":
self.blue_sock.send("CC")
res = self.blue_sock.recv(self.buff_bt)
self.client.send(res)
elif msg == "EE":
self.blue_sock.send("EE")
res = self.blue_sock.recv(self.buff_bt)
self.client.send(res)
elif msg == "FF":
self.blue_sock.send("FF")
res = self.blue_sock.recv(self.buff_bt)
self.client.send(res)
elif msg == "SS":
self.blue_sock.send("SS")
res = self.blue_sock.recv(self.buff_bt)
self.client.send(res)
elif msg == "MSG":
self.blue_sock.send("MSG")
res = self.blue_sock.recv(self.buff_bt)
self.client.send(res)
elif msg == "CLOSE":
self.run_thread = False
self.client.send("connection close")
def main():
server = ServerWalle('192.168.1.38',8080)
if __name__ == '__main__':
main()
| {
"content_hash": "4a54afce14a7b8346019eea44c901b05",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 75,
"avg_line_length": 31.967032967032967,
"alnum_prop": 0.5503609487796494,
"repo_name": "GeorgEncinas/Multi-Agents",
"id": "0ccfca7aa6663daf23c10a8431af4f6cb101ad4e",
"size": "2909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "serverpi/ServerWalle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "3437"
},
{
"name": "Python",
"bytes": "38025"
}
],
"symlink_target": ""
} |
import os
import json
import subprocess
from error_codes import *
from errors import error_info, is_error, print_errors
from helpers import general_info, geninfo_lookup
from .check_endpts import check_internet_connect, check_ama_endpts
from .check_imds import check_imds_api
def check_parameters():
global general_info
try:
with open('/etc/default/azuremonitoragent', 'r') as fp:
for line in fp:
line = line.split('export')[1].strip()
key = line.split('=')[0]
value = line.split('=')[1]
general_info[key] = value
except (FileNotFoundError, AttributeError) as e:
error_info.append((e,))
return ERR_AMA_PARAMETERS
return NO_ERROR
def check_workspace():
global general_info
dir_path = '/etc/opt/microsoft/azuremonitoragent/config-cache/configchunks'
dcr_workspace = set()
dcr_region = set()
me_region = set()
general_info['URL_SUFFIX'] = '.com'
try:
for file in os.listdir(dir_path):
file_path = dir_path + "/" + file
with open(file_path) as f:
result = json.load(f)
channels = result['channels']
for channel in channels:
if channel['protocol'] == 'ods':
# parse dcr workspace id
endpoint_url = channel['endpoint']
worspace_id = endpoint_url.split('https://')[1].split('.ods')[0]
dcr_workspace.add(worspace_id)
# parse dcr region
token_endpoint_uri = channel['tokenEndpointUri']
region = token_endpoint_uri.split('Location=')[1].split('&')[0]
dcr_region.add(region)
# parse url suffix
if '.us' in endpoint_url:
general_info['URL_SUFFIX'] = '.us'
if '.cn' in endpoint_url:
general_info['URL_SUFFIX'] = '.cn'
if channel['protocol'] == 'me':
# parse ME region
endpoint_url = channel['endpoint']
region = endpoint_url.split('https://')[1].split('.monitoring')[0]
me_region.add(region)
except (FileNotFoundError, AttributeError) as e:
error_info.append((e,))
return ERR_NO_DCR
general_info['DCR_WORKSPACE_ID'] = dcr_workspace
general_info['DCR_REGION'] = dcr_region
general_info['ME_REGION'] = me_region
return NO_ERROR
def check_subcomponents():
services = ['azuremonitoragent', 'azuremonitor-agentlauncher', 'azuremonitor-coreagent']
if len(geninfo_lookup('ME_REGION')) > 0:
services.append('metrics-sourcer')
services.append('metrics-extension')
for service in services:
try:
status = subprocess.check_output(['systemctl', 'status', service],\
universal_newlines=True, stderr=subprocess.STDOUT)
status_lines = status.split('\n')
for line in status_lines:
line = line.strip()
if line.startswith('Active:'):
if not line.split()[1] == 'active':
error_info.append((service, status))
return ERR_SUBCOMPONENT_STATUS
except subprocess.CalledProcessError as e:
error_info.append((e,))
return ERR_CHECK_STATUS
return NO_ERROR
def check_connection(interactive, err_codes=True, prev_success=NO_ERROR):
print("CHECKING CONNECTION...")
success = prev_success
# check /etc/default/azuremonitoragent file
print("Checking AMA parameters in /etc/default/azuremonitoragent...")
checked_parameters = check_parameters()
if (is_error(checked_parameters)):
return print_errors(checked_parameters)
else:
success = print_errors(checked_parameters)
# check DCR
print("Checking DCR...")
checked_workspace = check_workspace()
if (is_error(checked_workspace)):
return print_errors(checked_workspace)
else:
success = print_errors(checked_workspace)
# check general internet connectivity
print("Checking if machine is connected to the internet...")
checked_internet_connect = check_internet_connect()
if (is_error(checked_internet_connect)):
return print_errors(checked_internet_connect)
else:
success = print_errors(checked_internet_connect)
# check if AMA endpoints connected
print("Checking if machine can connect to Azure Monitor control-plane and data ingestion endpoints...")
checked_ama_endpts = check_ama_endpts()
if (is_error(checked_ama_endpts)):
return print_errors(checked_ama_endpts)
else:
success = print_errors(checked_ama_endpts)
# check if subcomponents are active (e.g. mdsd, telegraf, etc)
print("Checking if subcomponents have been started...")
checked_subcomponents = check_subcomponents()
if (is_error(checked_subcomponents)):
return print_errors(checked_subcomponents)
else:
success = print_errors(checked_subcomponents)
print("Checking if IMDS metadata and MSI tokens are available...")
checked_imds_api = check_imds_api()
if (is_error(checked_imds_api)):
return print_errors(checked_imds_api)
else:
success = print_errors(checked_imds_api)
return success
| {
"content_hash": "17ae5bcb3cf5c9f4694d843cab59d0ea",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 107,
"avg_line_length": 39.816901408450704,
"alnum_prop": 0.5795896710293598,
"repo_name": "Azure/azure-linux-extensions",
"id": "e4600caed3919cadf31b534dba75b5d98ad4c944",
"size": "5654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AzureMonitorAgent/ama_tst/modules/connect/connect.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "81542"
},
{
"name": "C++",
"bytes": "1038973"
},
{
"name": "CMake",
"bytes": "11642"
},
{
"name": "Dockerfile",
"bytes": "1539"
},
{
"name": "Go",
"bytes": "136483"
},
{
"name": "HTML",
"bytes": "32736"
},
{
"name": "JavaScript",
"bytes": "22883"
},
{
"name": "Makefile",
"bytes": "11405"
},
{
"name": "PowerShell",
"bytes": "22400"
},
{
"name": "Python",
"bytes": "5124041"
},
{
"name": "Roff",
"bytes": "3827"
},
{
"name": "Shell",
"bytes": "66718"
}
],
"symlink_target": ""
} |
import re, subprocess, os
from typing import Callable, Tuple, Union, cast, Dict, Any
from utils import getProjPath, runSubprocess
TEXCOUNT_PATH = "../../TeXcount_3_1/texcount.pl"
def parseOutput(data: str, regex: str) -> Dict[str, Any]:
s, s2 = None, None
if "Subcounts:" in data:
s, s2 = data.split("Subcounts:\n")
else:
s = data
res = {
"chapters": []
} #type: dict
for l in re.findall(regex, s, re.U):
# (sumc, text, headers, outside, headersN, floatsN, mathsI, mathsD) = l
res["total"] = l
if s2:
for l in re.findall(r"^ ([0-9]+)\+([0-9]+)\+([0-9]+) \(([0-9]+)\/([0-9]+)\/([0-9]+)\/([0-9]+)\) ([\w: äöü]+)", s2, re.U|re.M):
# (text, headers, captions, headersH, floatsH, inlinesH, displayedH, name) = l
res["chapters"].append((l[7],) + l[:7])
return res
def correctLetters(letters: dict, words: dict) -> None:
for k, v in letters.items():
if k in words:
if k == "chapters":
for ci, cv in enumerate(v):
letters[k][ci] = tuple((int(ccv) + int(words[k][ci][cci]) if not isinstance(ccv, str) else ccv) for cci, ccv in enumerate(cv))
else:
letters[k] = tuple(int(cv) + int(words[k][ci]) for ci, cv in enumerate(v))
lettersR = r"Sum count: ([0-9]+)\nLetters in text: ([0-9]+)\nLetters in headers: ([0-9]+)\nLetters in captions: ([0-9]+)\nNumber of headers: ([0-9]+)\nNumber of floats/tables/figures: ([0-9]+)\nNumber of math inlines: ([0-9]+)\nNumber of math displayed: ([0-9]+)"
wordsR = r"Sum count: ([0-9]+)\nWords in text: ([0-9]+)\nWords in headers: ([0-9]+)\nWords outside text \(captions, etc\.\): ([0-9]+)\nNumber of headers: ([0-9]+)\nNumber of floats\/tables\/figures: ([0-9]+)\nNumber of math inlines: ([0-9]+)\nNumber of math displayed: ([0-9]+)"
def count(path: str, buildPath: str, fileName: str) -> Tuple[bool, Union[dict, str]]:
if not os.path.isfile(path+"/"+fileName):
return (False, "File not found: '"+path+"/"+fileName+"'")
cmd = [
TEXCOUNT_PATH, # "-incbib",
"-merge", "-utf8", "-sum", "-relaxed", "-nocol", "-dir="+path+"/", "-auxdir="+buildPath+"/", path+"/"+fileName
]
try:
wordsOut = subprocess.check_output(cmd).decode("utf-8", errors="ignore")
if "File not found" in wordsOut:
raise subprocess.CalledProcessError(1, cmd, wordsOut)
words = parseOutput(wordsOut, wordsR)
lettersOut = subprocess.check_output(cmd + ["-chars"]).decode("utf-8", errors="ignore")
letters = parseOutput(lettersOut, lettersR)
correctLetters(letters, words)
except (subprocess.CalledProcessError, OSError, ValueError) as exc:
if type(exc).__name__ == "OSError":
return (False, str(exc))
else:
return (False, cast(subprocess.CalledProcessError, exc).output)
else:
return (True,{
"words": words,
"letters": letters
})
def copyFolderStructure(src: str, target: str) -> None:
for root, subFolders, _ in os.walk(src):
subFolders[:] = [d for d in subFolders if not d[0] == '.']
f = "."+root[len(src):]
try:
os.mkdir(os.path.join(target, f), 0o755)
except OSError:
pass
def doCompile(proj: str, buildPath: str, cfg: dict, log: Callable[[str], None]) -> bool:
successful = True
copyFolderStructure(getProjPath(proj), buildPath)
main = cfg.get("main", None)
if main:
cmd = ["latexmk",
"-interaction=nonstopmode",
# "-gg",
"-file-line-error",
"-outdir="+buildPath,
"-pdf", main+".tex" ]
env = {
"max_print_line": "100",
"error_line": "254",
"half_error_line": "238"
}
log(">>> " + (" ".join(cmd)) + "\n")
rv = runSubprocess(cmd, log, cwd=getProjPath(proj), env=env)
if rv != 0:
log("latexmk failed: "+str(rv) + "\n")
successful = False
else:
log("Missing 'main' in config")
successful = False
return successful
| {
"content_hash": "8876dcf4a09206104a535ca383a67ebb",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 278,
"avg_line_length": 33.387387387387385,
"alnum_prop": 0.6181867242309768,
"repo_name": "mischnic/python-ci",
"id": "024bb6e67cd2d0c105a7af0c746c280a7a3abf33",
"size": "3733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/src/latex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9837"
},
{
"name": "HTML",
"bytes": "1391"
},
{
"name": "JavaScript",
"bytes": "114364"
},
{
"name": "Python",
"bytes": "25826"
},
{
"name": "Shell",
"bytes": "1770"
}
],
"symlink_target": ""
} |
from django import forms
from django.forms.util import ValidationError
from django.utils.translation import ugettext as _
from selectfilter.forms import FilteredSelectMultiple, SelectBoxFilter, HyperLinksFilter
from selectfilter import utils
class AjaxManyToManyField(forms.ModelMultipleChoiceField):
"""
Base many to many form field that display filter choices using
JQuery ajax requests.
"""
def __init__(self, model, lookups, default_index=0, select_related=None,
widget=FilteredSelectMultiple, filter_widget=SelectBoxFilter, *args, **kwargs):
"""
model: the related model
lookups: a sequence of (label, lookup_dict) that tells how to
filter the objects
e.g. (
('active', {'is_active': True}),
('inactive', {'is_active': False}),
)
you may specify what you want in lookup_dict, give multiple
filter lookups for the same choice and also set a choice that
gets all unfiltered objects
e.g. (
('some stuff', {
'field1__startswith': 'a',
'field2': 'value'
}),
('all stuff', {}),
)
default_index: the index of the lookup sequence that will
be the default choice when the field is initially displayed.
set to None if you want the widget to start empty
select_related: if not None the resulting querydict is performed
using select_related(select_related), allowing foreign keys
to be retrieved (e.g. useful when the unicode representation
of the model objects contains references to foreign keys)
It is possible to pass all the other args and kwargs accepted by
the django field class.
"""
# get the default index and queryset
# queryset is empty if default index is None
if default_index is None:
queryset = model.objects.none()
else:
lookups_list = utils.getLookups(lookups)
if default_index >= len(lookups_list):
default_index = 0
lookup_dict = lookups_list[default_index][1]
# get the queryset
queryset = utils.getObjects(model, lookup_dict, select_related)
# call the parent constructor
super(AjaxManyToManyField, self
).__init__(queryset, widget=widget, *args, **kwargs)
# populate widget with some data
self.widget.lookups = self.lookups = lookups
self.widget.model = self.model = model
self.widget.select_related = select_related
self.widget.filter_widget = filter_widget()
self.widget.default_index = default_index
def clean(self, value):
if self.required and not value:
raise ValidationError(self.error_messages['required'])
elif not self.required and not value:
return []
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'])
final_values = []
# if there is only one lookup used to limit choices, then a real
# validation over that limited choices is performed
lookups_list = utils.getLookups(self.lookups)
limit_choices_to = {} if len(lookups_list) != 1 else lookups_list[0][1]
for val in value:
try:
obj = self.model.objects.get(pk=val, **limit_choices_to)
except self.model.DoesNotExist:
raise ValidationError(self.error_messages['invalid_choice'] % val)
else:
final_values.append(obj)
return final_values
def _byLetterFactory(parent):
"""
Factory function returning a ManyToMany or ForeignKey field with
filters based on initials of a field of the objects.
parent can be AjaxManyToManyField or AjaxForeignKeyField.
"""
class ByLetter(parent):
"""
Ajax filtered field that displays filters based on
initials of a field of the objects, as they are typed by the user.
"""
def __init__(self, model, field_name="name", *args, **kwargs):
"""
model: the related model
field_name: the name of the field looked up
for initial
It is possible to pass all the other args and kwargs accepted by
parent ajax filtered field.
"""
import string
lookup_key = "%s__istartswith" % field_name
lookups = [(i, {lookup_key: i}) for i in string.lowercase]
# other non-letter records
regex_lookup_key = "%s__iregex" % field_name
lookups.append((_('other'), {regex_lookup_key: "^[^a-z]"}))
super(ByLetter, self).__init__(model, lookups, *args, **kwargs)
return ByLetter
ManyToManyByLetter = _byLetterFactory(AjaxManyToManyField)
def _byStatusFactory(parent):
"""
Factory function returning a ManyToMany or ForeignKey field with
filters based on activation status of the object.
parent can be AjaxManyToManyField or AjaxForeignKeyField.
"""
class ByStatus(parent):
"""
Ajax filtered field that displays filters based on
activation status of the objects.
"""
def __init__(self, model, field_name="is_active", *args, **kwargs):
"""
model: the related model
field_name: the name of the field that
manages the activation of the object
It is possible to pass all the other args and kwargs accepted by
parent ajax filtered field.
"""
lookups = (
(_('active'), {field_name: True}),
(_('inactive'), {field_name: False}),
(_('all'), {}),
)
super(ByStatus, self).__init__(model, lookups, *args, **kwargs)
return ByStatus
ManyToManyByStatus = _byStatusFactory(AjaxManyToManyField)
def _byRelatedFieldFactory(parent):
"""
Factory function returning a ManyToMany or ForeignKey field with
filters based on a related field (foreign key or many to many) of the
object. parent can be AjaxManyToManyField or AjaxForeignKeyField.
"""
class ByRelatedField(parent):
"""
Ajax filtered field that displays filters based on a related field
(foreign key or many to many) of the object.
"""
def __init__(self, model, field_name, include_blank=False, filter_not_used=False, *args, **kwargs):
"""
model: the related model
field_name: the name of the field representing the relationship
between the model and the related model
include_blank: if not False is displayed a NULL choice for
objects without relation (field_name__isnull=True).
The label of the choice must be specified as string.
It is possible to pass all the other args and kwargs accepted by
parent ajax filtered field.
"""
field = model._meta.get_field(field_name)
attname = "%s__pk" % field_name
def lookups():
"""
Return the lookups dict. This is needed because the lookups
may change as consequence of database changes at runtime.
"""
choices = field.get_choices(include_blank=include_blank)
if filter_not_used:
filtered_pk_set = set(model.objects.exclude(**{field_name: None}).values_list(field_name, flat=True))
choices = [choice for choice in choices if choice[0] in filtered_pk_set]
lookups_ = [(label, {attname: pk}) for pk, label in choices if pk]
# add the blank choice lookup
if include_blank:
attname_isnull = "%s__isnull" % field_name
lookups_.append((include_blank, {attname_isnull: True}))
# add the all objects lookup
lookups_.insert(0, ('-', {}))
return lookups_
super(ByRelatedField, self).__init__(model, lookups, *args, **kwargs)
return ByRelatedField
ManyToManyByRelatedField = _byRelatedFieldFactory(AjaxManyToManyField)
| {
"content_hash": "53cc6a0fb7213fef5a9ff4b7b5334028",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 106,
"avg_line_length": 35.35820895522388,
"alnum_prop": 0.7049387927395525,
"repo_name": "gista/django-selectfilter",
"id": "712dadbe1664c27583751d429512536f98e7b9ea",
"size": "7131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "selectfilter/forms/fields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "10262"
},
{
"name": "Python",
"bytes": "17785"
}
],
"symlink_target": ""
} |
"""This module contains an object that represents a location to which a chat is connected."""
from typing import TYPE_CHECKING, Optional
from telegram._files.location import Location
from telegram._telegramobject import TelegramObject
from telegram._utils.types import JSONDict
if TYPE_CHECKING:
from telegram import Bot
class ChatLocation(TelegramObject):
"""This object represents a location to which a chat is connected.
Objects of this class are comparable in terms of equality. Two objects of this class are
considered equal, if their :attr:`location` is equal.
Args:
location (:class:`telegram.Location`): The location to which the supergroup is connected.
Can't be a live location.
address (:obj:`str`): Location address; 1-64 characters, as defined by the chat owner
Attributes:
location (:class:`telegram.Location`): The location to which the supergroup is connected.
address (:obj:`str`): Location address, as defined by the chat owner
"""
__slots__ = ("location", "address")
def __init__(
self,
location: Location,
address: str,
*,
api_kwargs: JSONDict = None,
):
super().__init__(api_kwargs=api_kwargs)
self.location = location
self.address = address
self._id_attrs = (self.location,)
@classmethod
def de_json(cls, data: Optional[JSONDict], bot: "Bot") -> Optional["ChatLocation"]:
"""See :meth:`telegram.TelegramObject.de_json`."""
data = cls._parse_data(data)
if not data:
return None
data["location"] = Location.de_json(data.get("location"), bot)
return super().de_json(data=data, bot=bot)
| {
"content_hash": "e2347ee410dc925a08a118db6fa02acf",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 97,
"avg_line_length": 32.148148148148145,
"alnum_prop": 0.6532258064516129,
"repo_name": "tzpBingo/github-trending",
"id": "f32867235592a5a92e8bb3d18b306f30f4a7b35e",
"size": "2545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codespace/python/telegram/_chatlocation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "11470"
},
{
"name": "HTML",
"bytes": "1543"
},
{
"name": "Python",
"bytes": "49985109"
},
{
"name": "Shell",
"bytes": "18039"
}
],
"symlink_target": ""
} |
"""
This module is part of the nmeta suite running on top of Ryu SDN controller.
It provides a policy class as an interface to policy configuration and
classification of packets against policy.
See Policy class docstring for more information.
"""
import sys
import os
import datetime
#*** nmeta imports:
import tc_static
import tc_identity
import tc_custom
#*** Voluptuous to verify inputs against schema:
from voluptuous import Schema, Optional, Any, All, Required, Extra
from voluptuous import Invalid, MultipleInvalid, Range
#*** Import netaddr for MAC and IP address checking:
from netaddr import IPAddress
from netaddr import IPNetwork
from netaddr import EUI
#*** YAML for config and policy file parsing:
import yaml
#*** Regular Expressions:
import re
#*** For logging configuration:
from baseclass import BaseClass
#================== Functions (need to come first):
def validate(logger, data, schema, where):
"""
Generic validation of a data structure against schema
using Voluptuous data validation library
Parameters:
- logger: valid logger reference
- data: structure to validate
- schema: a valid Voluptuous schema
- where: string for debugging purposes to identity the policy location
"""
logger.debug("validating data=%s", data)
try:
#*** Check correctness of data against schema with Voluptuous:
schema(data)
except MultipleInvalid as exc:
#*** There was a problem with the data:
logger.critical("Voluptuous detected a problem where=%s, exception=%s",
where, exc)
sys.exit("Exiting nmeta. Please fix error in main_policy.yaml")
return 1
def validate_port_set_list(logger, port_set_list, policy):
"""
Validate that a list of dictionaries [{'port_set': str}]
reference valid port_sets. Return Boolean 1 if good otherwise
exit with exception
"""
for port_set_dict in port_set_list:
found = 0
for port_set in policy.port_sets.port_sets_list:
if port_set.name == port_set_dict['port_set']:
found = 1
if not found:
logger.critical("Undefined port_set=%s", port_set_dict['port_set'])
sys.exit("Exiting nmeta. Please fix error in main_policy.yaml")
return 1
def validate_location(logger, location, policy):
"""
Validator for location compliance (i.e. check that the supplied
location string exists as a location defined in policy)
Return Boolean True if good, otherwise exit with exception
"""
for policy_location in policy.locations.locations_list:
if policy_location.name == location:
return True
logger.critical("Undefined location=%s", location)
sys.exit("Exiting nmeta. Please fix error in main_policy.yaml")
def validate_type(type, value, msg):
"""
Used for Voluptuous schema validation.
Check a value is correct type, otherwise raise Invalid exception,
including elaborated version of msg
"""
try:
return type(value)
except ValueError:
msg = msg + ", value=" + value + ", expected type=" + type.__name__
raise Invalid(msg)
def transform_ports(ports):
"""
Passed a ports specification and return a list of
port numbers for easy searching.
Example:
Ports specification "1-3,5,66" becomes list [1,2,3,5,66]
"""
result = []
ports = str(ports)
for part in ports.split(','):
if '-' in part:
part_a, part_b = part.split('-')
part_a, part_b = int(part_a), int(part_b)
result.extend(range(part_a, part_b + 1))
else:
part_a = int(part)
result.append(part_a)
return result
def validate_ports(ports):
"""
Custom Voluptuous validator for a list of ports.
Example good ports specification:
1-3,5,66
Will raise Voluptuous Invalid exception if types or
ranges are not correct
"""
msg = 'Ports specification contains non-integer value'
msg2 = 'Ports specification contains invalid range'
#*** Cast to String:
ports = str(ports)
#*** Split into components separated by commas:
for part in ports.split(','):
#*** Handle ranges:
if '-' in part:
part_a, part_b = part.split('-')
#*** can they be cast to integer?:
validate_type(int, part_a, msg)
validate_type(int, part_b, msg)
#*** In a port range, part_b must be larger than part_a:
if not int(part_b) > int(part_a):
raise Invalid(msg2)
else:
#*** can it be cast to integer?:
validate_type(int, part, msg)
return ports
def validate_time_of_day(time_of_day):
"""
Custom Voluptuous validator for time of day compliance.
Returns original time of day if compliant, otherwise
raises Voluptuous Invalid exception
"""
msg1 = 'Invalid time of day start'
msg2 = 'Invalid time of day finish'
timeformat = "%H:%M"
(time_of_day1, time_of_day2) = time_of_day.split('-')
try:
validtime = datetime.datetime.strptime(time_of_day1, timeformat)
except:
raise Invalid(msg1)
try:
validtime = datetime.datetime.strptime(time_of_day2, timeformat)
except:
raise Invalid(msg2)
return time_of_day
def validate_macaddress(mac_addr):
"""
Custom Voluptuous validator for MAC address compliance.
Returns original MAC address if compliant, otherwise
raises Voluptuous Invalid exception
"""
msg = 'Invalid MAC address'
try:
result = EUI(mac_addr)
if result.version != 48:
raise Invalid(msg)
except:
raise Invalid(msg)
return mac_addr
def validate_macaddress_OLD(mac_addr):
"""
Custom Voluptuous validator for MAC address compliance.
Returns original MAC address if compliant, otherwise
raises Voluptuous Invalid exception
"""
msg = 'Invalid MAC address'
try:
if not re.match("[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$", mac_addr.lower()):
raise Invalid(msg)
except:
raise Invalid(msg)
return mac_addr
def validate_ip_space(ip_addr):
"""
Custom Voluptuous validator for IP address compliance.
Can be IPv4 or IPv6 and can be range or have CIDR mask.
Returns original IP address if compliant, otherwise
raises Voluptuous Invalid exception
"""
msg = 'Invalid IP address'
#*** Does it look like a CIDR network?:
if "/" in ip_addr:
try:
if not IPNetwork(ip_addr):
raise Invalid(msg)
except:
raise Invalid(msg)
return ip_addr
#*** Does it look like an IP range?:
elif "-" in ip_addr:
ip_range = ip_addr.split("-")
if len(ip_range) != 2:
raise Invalid(msg)
try:
if not (IPAddress(ip_range[0]) and IPAddress(ip_range[1])):
raise Invalid(msg)
except:
raise Invalid(msg)
#*** Check second value in range greater than first value:
if IPAddress(ip_range[0]).value >= IPAddress(ip_range[1]).value:
raise Invalid(msg)
#*** Check both IP addresses are the same version:
if IPAddress(ip_range[0]).version != \
IPAddress(ip_range[1]).version:
raise Invalid(msg)
return ip_addr
else:
#*** Or is it just a plain simple IP address?:
try:
if not IPAddress(ip_addr):
raise Invalid(msg)
except:
raise Invalid(msg)
return ip_addr
def validate_ethertype(ethertype):
"""
Custom Voluptuous validator for ethertype compliance.
Can be in hex (starting with 0x) or decimal.
Returns ethertype if compliant, otherwise
raises Voluptuous Invalid exception
"""
msg = 'Invalid EtherType'
if str(ethertype)[:2] == '0x':
#*** Looks like hex:
try:
if not (int(ethertype, 16) > 0 and \
int(ethertype, 16) < 65536):
raise Invalid(msg)
except:
raise Invalid(msg)
else:
#*** Perhaps it's decimal?
try:
if not (int(ethertype) > 0 and \
int(ethertype) < 65536):
raise Invalid(msg)
except:
raise Invalid(msg)
return ethertype
#================= Voluptuous Schema for Validating Policy
#*** Voluptuous schema for top level keys in the main policy:
TOP_LEVEL_SCHEMA = Schema({
Required('tc_rules'):
{Extra: object},
Required('qos_treatment'):
{Extra: object},
Required('port_sets'):
{Extra: object},
Required('locations'):
{Extra: object}
})
#*** Voluptuous schema for tc_rules branch of main policy:
TC_RULES_SCHEMA = Schema([{Extra: object}])
#*** Voluptuous schema for a tc_rule:
TC_RULE_SCHEMA = Schema({
Optional('comment'):
str,
Required('match_type'):
Required(Any('any', 'all', 'none')),
Required('conditions_list'):
[{Extra: object}],
Required('actions'):
{Extra: object}
})
#*** Voluptuous schema for a tc condition:
TC_CONDITION_SCHEMA = Schema({
Required('match_type'):
Required(Any('any', 'all', 'none')),
Required('classifiers_list'):
[{Extra: object}]
})
#*** Voluptuous schema for a tc classifier:
TC_CLASSIFIER_SCHEMA = Schema({
Optional('location_src'): str,
Optional('time_of_day'): validate_time_of_day,
Optional('eth_src'): validate_macaddress,
Optional('eth_dst'): validate_macaddress,
Optional('ip_src'): validate_ip_space,
Optional('ip_dst'): validate_ip_space,
Optional('tcp_src'): All(int, Range(min=0, max=65535)),
Optional('tcp_dst'): All(int, Range(min=0, max=65535)),
Optional('udp_src'): All(int, Range(min=0, max=65535)),
Optional('udp_dst'): All(int, Range(min=0, max=65535)),
Optional('eth_type'): validate_ethertype,
Optional('identity_lldp_systemname'): str,
Optional('identity_lldp_systemname_re'): str,
Optional('identity_dhcp_hostname'): str,
Optional('identity_dhcp_hostname_re'): str,
Optional('identity_service_dns'): str,
Optional('identity_service_dns_re'): str,
Optional('custom'): str
})
#*** Voluptuous schema for tc actions:
TC_ACTIONS_SCHEMA = Schema({
Optional('drop'): Any('at_controller',
'at_controller_and_switch'),
Optional('qos_treatment'): Any('default_priority',
'constrained_bw',
'high_priority',
'low_priority',
'classifier_return'),
Required('set_desc'): str
})
#*** Voluptuous schema for qos_treatment branch of main policy:
QOS_TREATMENT_SCHEMA = Schema({str: int})
#*** Voluptuous schema for port_sets branch of main policy:
PORT_SETS_SCHEMA = Schema({
Required('port_set_list'):
[{Extra: object}]
})
#*** Voluptuous schema for a port set node in main policy:
PORT_SET_SCHEMA = Schema({
Required('name'): str,
Required('port_list'):
[
{
'name': str,
'DPID': int,
'ports': validate_ports,
'vlan_id': int
}
]
})
#*** Voluptuous schema for locations branch of main policy:
LOCATIONS_SCHEMA = Schema({
Required('locations_list'):
[{Extra: object}],
Required('default_match'): str
})
#*** Voluptuous schema for a location node in main policy:
LOCATION_SCHEMA = Schema({
Required('name'): str,
Required('port_set_list'):
[{'port_set': str}],
})
#*** Default policy file location parameters:
POL_DIR_DEFAULT = "config"
POL_DIR_USER = "config/user"
POL_FILENAME = "main_policy.yaml"
class Policy(BaseClass):
"""
This policy class serves 4 main purposes:
- Ingest policy (main_policy.yaml) from file
- Validate correctness of policy against schema
- Classify packets against policy, passing through to static,
identity and custom classifiers, as required
- Other methods and functions to check various parameters
against policy
Note: Class definitions are not nested as not considered Pythonic
Main Methods and Variables:
- check_policy(flow, ident) # Check a packet against policy
- qos(qos_treatment) # Map qos_treatment string to queue number
- main_policy # main policy YAML object. Read-only,
no verbs. Use methods instead where
possible.
TC Methods and Variables:
- tc_rules.rules_list # List of TC rules
- tc_rules.custom_classifiers # dedup list of custom classifier names
"""
def __init__(self, config, pol_dir_default=POL_DIR_DEFAULT,
pol_dir_user=POL_DIR_USER,
pol_filename=POL_FILENAME):
""" Initialise the Policy Class """
#*** Required for BaseClass:
self.config = config
#*** Set up Logging with inherited base class method:
self.configure_logging(__name__, "policy_logging_level_s",
"policy_logging_level_c")
self.policy_dir_default = pol_dir_default
self.policy_dir_user = pol_dir_user
self.policy_filename = pol_filename
#*** Get working directory:
self.working_directory = os.path.dirname(__file__)
#*** Build the full path and filename for the user policy file:
self.fullpathname = os.path.join(self.working_directory,
self.policy_dir_user,
self.policy_filename)
if os.path.isfile(self.fullpathname):
self.logger.info("Opening user policy file=%s", self.fullpathname)
else:
self.logger.info("User policy file=%s not found",
self.fullpathname)
self.fullpathname = os.path.join(self.working_directory,
self.policy_dir_default,
self.policy_filename)
self.logger.info("Opening default policy file=%s",
self.fullpathname)
#*** Ingest the policy file:
try:
with open(self.fullpathname, 'r') as filename:
self.main_policy = yaml.safe_load(filename)
except (IOError, OSError) as exception:
self.logger.error("Failed to open policy "
"file=%s exception=%s",
self.fullpathname, exception)
sys.exit("Exiting nmeta. Please create policy file")
#*** Instantiate Classes:
self.static = tc_static.StaticInspect(config, self)
self.identity = tc_identity.IdentityInspect(config)
self.custom = tc_custom.CustomInspect(config)
#*** Check the correctness of the top level of main policy:
validate(self.logger, self.main_policy, TOP_LEVEL_SCHEMA, 'top')
#*** Instantiate classes for the second levels of policy:
self.port_sets = PortSets(self)
self.locations = Locations(self)
self.tc_rules = TCRules(self)
self.qos_treatment = QoSTreatment(self)
#*** Instantiate any custom classifiers:
self.custom.instantiate_classifiers(self.tc_rules.custom_classifiers)
def check_policy(self, flow, ident):
"""
Passed a flows object, set in context of current packet-in event,
and an identities object.
Check if packet matches against any policy
rules and if it does, update the classifications portion of
the flows object to reflect details of the classification.
"""
#*** Check against TC policy:
for tc_rule in self.tc_rules.rules_list:
#*** Check the rule:
tc_rule_result = tc_rule.check_tc_rule(flow, ident)
if tc_rule_result.match:
self.logger.debug("Matched policy rule=%s", tc_rule.__dict__)
#*** Only set 'classified' if continue_to_inspect not set:
if not tc_rule_result.continue_to_inspect:
flow.classification.classified = True
else:
flow.classification.classified = False
flow.classification.classification_tag = \
tc_rule_result.classification_tag
flow.classification.classification_time = \
datetime.datetime.now()
#*** Accumulate any actions:
flow.classification.actions.update(tc_rule_result.actions)
self.logger.debug("flow.classification.actions=%s",
flow.classification.actions)
return 1
#*** No matches. Mark as classified so we don't process again:
flow.classification.classified = True
return 0
def qos(self, qos_treatment):
"""
Passed a QoS treatment string and return the relevant
QoS queue number to use, otherwise 0. Works by lookup
on qos_treatment section of main_policy
"""
qos_policy = self.main_policy['qos_treatment']
if qos_treatment in qos_policy:
return qos_policy[qos_treatment]
elif qos_treatment == 'classifier_return':
#*** This happens:
self.logger.debug("custom classifier did not return "
"qos_treatment")
return 0
else:
self.logger.error("qos_treatment=%s not found in main_policy",
qos_treatment)
return 0
class TCRules(object):
"""
An object that represents the tc_rules root branch of
the main policy
"""
def __init__(self, policy):
""" Initialise the TCRules Class """
#*** Extract logger and policy YAML branch:
self.logger = policy.logger
#*** TBD: fix arbitrary single ruleset...
self.yaml = policy.main_policy['tc_rules']['tc_ruleset_1']
#*** List to be populated with names of any custom classifiers:
self.custom_classifiers = []
#*** Check the correctness of the tc_rules branch of main policy:
validate(self.logger, self.yaml, TC_RULES_SCHEMA, 'tc_rules')
#*** Read in rules:
self.rules_list = []
for idx, key in enumerate(self.yaml):
self.rules_list.append(TCRule(self, policy, idx))
class TCRule(object):
"""
An object that represents a single traffic classification
(TC) rule.
"""
def __init__(self, tc_rules, policy, idx):
"""
Initialise the TCRule Class
Passed a TCRules class instance, a Policy class instance
and an index integer for the index of the tc rule in policy
"""
#*** Extract logger and policy YAML:
self.logger = policy.logger
#*** TBD: fix arbitrary single ruleset...
self.yaml = policy.main_policy['tc_rules']['tc_ruleset_1'][idx]
#*** Check the correctness of the tc rule, including actions:
validate(self.logger, self.yaml, TC_RULE_SCHEMA, 'tc_rule')
validate(self.logger, self.yaml['actions'], TC_ACTIONS_SCHEMA,
'tc_rule_actions')
self.match_type = self.yaml['match_type']
self.actions = self.yaml['actions']
#*** Read in conditions_list:
self.conditions_list = []
for condition in self.yaml['conditions_list']:
self.conditions_list.append(TCCondition(tc_rules,
policy, condition))
def check_tc_rule(self, flow, ident):
"""
Passed Packet and Identity class objects.
Check to see if packet matches conditions as per the
TC rule. Return a TCRuleResult object
"""
#*** Instantiate object to hold results for checks:
result = TCRuleResult(self.actions)
#*** Iterate through the conditions list:
for condition in self.conditions_list:
condition_result = condition.check_tc_condition(flow, ident)
self.logger.debug("condition=%s result=%s", condition.__dict__,
condition_result.__dict__)
#*** Decide what to do based on match result and type:
if condition_result.match and self.match_type == "any":
result.match = True
result.add_rule_actions()
result.accumulate(condition_result)
self.logger.debug("matched_1, result=%s", result.__dict__)
return result
elif not result.match and self.match_type == "all":
result.match = False
self.logger.debug("no_match_1, result=%s", result.__dict__)
return result
elif result.match and self.match_type == "all":
#*** Just accumulate the results:
result.accumulate(condition_result)
elif result.match and self.match_type == "none":
result.match = False
self.logger.debug("no_match_2, result=%s", result.__dict__)
return result
else:
#*** Not a condition we take action on so keep going:
pass
#*** We've finished loop through all conditions and haven't
#*** returned. Work out what action to take:
if not condition_result.match and self.match_type == "any":
result.match = False
self.logger.debug("no_match_3, result=%s", result.__dict__)
return result
elif condition_result.match and self.match_type == "all":
result.match = True
result.add_rule_actions()
result.accumulate(condition_result)
self.logger.debug("matched_2, result=%s", result.__dict__)
return result
elif not condition_result.match and self.match_type == "none":
result.match = True
result.add_rule_actions()
self.logger.debug("matched_3, result=%s", result.__dict__)
return result
else:
#*** Unexpected result:
self.logger.error("Unexpected result at "
"end of loop through rule=%s", self.yaml)
result.match = False
return result
class TCRuleResult(object):
"""
An object that represents a traffic classification
result, including any decision collateral
on matches and actions.
Use __dict__ to dump to data to dictionary
"""
def __init__(self, rule_actions):
""" Initialise the TCRuleResult Class """
self.match = 0
self.continue_to_inspect = 0
self.classification_tag = ""
self.actions = {}
#*** Actions defined in policy for this rule:
self.rule_actions = rule_actions
def accumulate(self, condition_result):
"""
Passed a TCConditionResult object and
accumulate values into our object
"""
if condition_result.match:
self.match = True
if condition_result.continue_to_inspect:
self.continue_to_inspect = True
if self.rule_actions['set_desc'] == 'classifier_return':
self.classification_tag = condition_result.classification_tag
else:
self.classification_tag = self.rule_actions['set_desc']
self.actions.update(condition_result.actions)
def add_rule_actions(self):
"""
Add rule actions from policy to the actions of this class
"""
self.actions.update(self.rule_actions)
class TCCondition(object):
"""
An object that represents a single traffic classification
(TC) rule condition from a conditions list
(contains a match type and a list of one or more classifiers)
"""
def __init__(self, tc_rules, policy, policy_snippet):
"""
Initialise the TCCondition Class
Passed a TCRules class instance, a Policy class instance
and a snippet of tc policy for a condition
"""
self.policy = policy
self.logger = policy.logger
self.yaml = policy_snippet
self.classifiers = []
#*** Check the correctness of the tc condition:
validate(self.logger, self.yaml, TC_CONDITION_SCHEMA,
'tc_rule_condition')
for classifier in self.yaml['classifiers_list']:
#*** Validate classifier:
validate(self.logger, classifier, TC_CLASSIFIER_SCHEMA,
'tc_classifier')
#*** Extra validation for location_src:
policy_attr = next(iter(classifier))
policy_value = classifier[policy_attr]
if policy_attr == 'location_src':
validate_location(self.logger, policy_value, policy)
self.classifiers.append(classifier)
#*** Accumulate deduplicated custom classifier names:
if 'custom' in classifier:
custlist = tc_rules.custom_classifiers
if classifier['custom'] not in custlist:
custlist.append(classifier['custom'])
self.match_type = self.yaml['match_type']
def check_tc_condition(self, flow, ident):
"""
Passed a Flow and Identity class objects. Check to see if
flow.packet matches condition (a set of classifiers)
as per the match type.
Return a TCConditionResult object with match information.
"""
pkt = flow.packet
result = TCConditionResult()
self.logger.debug("self.classifiers=%s", self.classifiers)
#*** Iterate through classifiers (example: tcp_src: 123):
for classifier in self.classifiers:
policy_attr = next(iter(classifier))
policy_value = classifier[policy_attr]
#*** Instantiate data structure for classifier result:
classifier_result = TCClassifierResult(policy_attr, policy_value)
self.logger.debug("Iterating classifiers, policy_attr=%s "
"policy_value=%s, policy_attr_type=%s", policy_attr,
policy_value, classifier_result.policy_attr_type)
#*** Main check on classifier attribute type:
if classifier_result.policy_attr_type == "identity":
self.policy.identity.check_identity(classifier_result, pkt,
ident)
elif policy_attr == "custom":
self.policy.custom.check_custom(classifier_result, flow, ident)
self.logger.debug("custom match condition=%s",
classifier_result.__dict__)
else:
#*** default to static classifier:
self.policy.static.check_static(classifier_result, pkt)
self.logger.debug("static match=%s",
classifier_result.__dict__)
#*** Decide what to do based on match result and type:
if classifier_result.match and self.match_type == "any":
result.accumulate(classifier_result)
return result
elif not classifier_result.match and self.match_type == "all":
result.match = False
return result
elif classifier_result.match and self.match_type == "none":
result.match = False
return result
else:
#*** Not a condition we take action on, keep going:
pass
#*** Finished loop through all conditions without return.
#*** Work out what action to take:
if not classifier_result.match and self.match_type == "any":
result.match = False
return result
elif classifier_result.match and self.match_type == "all":
result.accumulate(classifier_result)
return result
elif not classifier_result.match and self.match_type == "none":
result.match = True
return result
else:
#*** Unexpected result:
self.logger.error("Unexpected result at end of loop"
"classifier_result=%s",
classifier_result.__dict__)
result.match = False
return result
class TCConditionResult(object):
"""
An object that represents a traffic classification condition
result. Custom classifiers can return additional parameters
beyond a Boolean match, so cater for these too.
Use __dict__ to dump to data to dictionary
"""
def __init__(self):
""" Initialise the TCConditionResult Class """
self.match = False
self.continue_to_inspect = False
self.classification_tag = ""
self.actions = {}
def accumulate(self, classifier_result):
"""
Passed a TCClassifierResult object and
accumulate values into our object
"""
if classifier_result.match:
self.match = True
if classifier_result.continue_to_inspect:
self.continue_to_inspect = True
self.actions.update(classifier_result.actions)
self.classification_tag += classifier_result.classification_tag
class TCClassifierResult(object):
"""
An object that represents a traffic classification classifier
result. Custom classifiers can return additional parameters
beyond a Boolean match, so cater for these too.
Use __dict__ to dump to data to dictionary
"""
def __init__(self, policy_attr, policy_value):
""" Initialise the TCClassifierResult Class """
self.match = False
self.continue_to_inspect = 0
self.policy_attr = policy_attr
#*** Policy Attribute Type is for identity classifiers
self.policy_attr_type = policy_attr.split("_")[0]
self.policy_value = policy_value
self.classification_tag = ""
self.actions = {}
class QoSTreatment(object):
"""
An object that represents the qos_treatment root branch of
the main policy
"""
def __init__(self, policy):
""" Initialise the QoSTreatment Class """
#*** Extract logger and policy YAML branch:
self.logger = policy.logger
self.yaml = policy.main_policy['qos_treatment']
#*** Check the correctness of the qos_treatment branch of main policy:
validate(self.logger, self.yaml, QOS_TREATMENT_SCHEMA, 'qos_treatment')
class PortSets(object):
"""
An object that represents the port_sets root branch of
the main policy
"""
def __init__(self, policy):
""" Initialise the PortSets Class """
#*** Extract logger and policy YAML branch:
self.logger = policy.logger
self.yaml = policy.main_policy['port_sets']
#*** Check the correctness of the port_sets branch of main policy:
validate(self.logger, self.yaml, PORT_SETS_SCHEMA, 'port_sets')
#*** Read in port_sets:
self.port_sets_list = []
for idx, key in enumerate(self.yaml['port_set_list']):
self.port_sets_list.append(PortSet(policy, idx))
def get_port_set(self, dpid, port, vlan_id=0):
"""
Check if supplied dpid/port/vlan_id is member of
a port set and if so, return the port_set name. If no
match return empty string.
"""
for idx in self.port_sets_list:
if idx.is_member(dpid, port, vlan_id):
return idx.name
return ""
class PortSet(object):
"""
An object that represents a single port set
"""
def __init__(self, policy, idx):
""" Initialise the PortSet Class """
#*** Extract logger and policy YAML:
self.logger = policy.logger
self.yaml = \
policy.main_policy['port_sets']['port_set_list'][idx]
self.name = self.yaml['name']
#*** Check the correctness of the location policy:
validate(self.logger, self.yaml, PORT_SET_SCHEMA, 'port_set')
#*** Build searchable lists of ports
#*** (ranges turned into multiple single values):
port_list = self.yaml['port_list']
for ports in port_list:
ports['ports_xform'] = transform_ports(ports['ports'])
def is_member(self, dpid, port, vlan_id=0):
"""
Check to see supplied dpid/port/vlan_id is member of
this port set. Returns a Boolean
"""
#*** Validate dpid is an integer (and coerce if required):
msg = 'dpid must be integer'
dpid = validate_type(int, dpid, msg)
#*** Validate port is an integer (and coerce if required):
msg = 'Port must be integer'
port = validate_type(int, port, msg)
#*** Validate vlan_id is an integer (and coerce if required):
msg = 'vlan_id must be integer'
vlan_id = validate_type(int, vlan_id, msg)
#*** Iterate through port list looking for a match:
port_list = self.yaml['port_list']
for ports in port_list:
if not ports['DPID'] == dpid:
self.logger.debug("did not match dpid")
continue
if not ports['vlan_id'] == vlan_id:
self.logger.debug("did not match vlan_id")
continue
if port in ports['ports_xform']:
return True
self.logger.debug("no match, returning False")
return False
class Locations(object):
"""
An object that represents the locations root branch of
the main policy
"""
def __init__(self, policy):
""" Initialise the Locations Class """
#*** Extract logger and policy YAML branch:
self.logger = policy.logger
self.yaml = policy.main_policy['locations']
#*** Check the correctness of the locations branch of main policy:
validate(self.logger, self.yaml, LOCATIONS_SCHEMA, 'locations')
#*** Read in locations etc:
self.locations_list = []
for idx, key in enumerate(self.yaml['locations_list']):
self.locations_list.append(Location(policy, idx))
#*** Default location to use if no match:
self.default_match = self.yaml['default_match']
def get_location(self, dpid, port):
"""
Passed a DPID and port and return a logical location
name, as per policy configuration.
"""
result = ""
for location in self.locations_list:
result = location.check(dpid, port)
if result:
return result
return self.default_match
class Location(object):
"""
An object that represents a single location
"""
def __init__(self, policy, idx):
""" Initialise the Location Class """
#*** Extract logger and policy YAML:
self.logger = policy.logger
self.policy = policy
self.yaml = \
policy.main_policy['locations']['locations_list'][idx]
#*** Check the correctness of the location policy:
validate(self.logger, self.yaml, LOCATION_SCHEMA, 'location')
#*** Check that port sets exist:
validate_port_set_list(self.logger, self.yaml['port_set_list'],
policy)
#*** Store data from YAML into this class:
self.name = self.yaml['name']
self.port_set_list = self.yaml['port_set_list']
def check(self, dpid, port):
"""
Check a dpid/port to see if it is part of this location
and if so return the string name of the location otherwise
return empty string
"""
port_set_membership = \
self.policy.port_sets.get_port_set(dpid, port)
for port_set in self.port_set_list:
if port_set['port_set'] == port_set_membership:
return self.name
return ""
| {
"content_hash": "a297d6e9d368d77db5d5ae5912e51880",
"timestamp": "",
"source": "github",
"line_count": 956,
"max_line_length": 94,
"avg_line_length": 39.99686192468619,
"alnum_prop": 0.5587258414624579,
"repo_name": "mattjhayes/nmeta",
"id": "316d4c12cccc7e9304263734263d1e1546143b61",
"size": "38856",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nmeta/policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5608"
},
{
"name": "HTML",
"bytes": "15623"
},
{
"name": "JavaScript",
"bytes": "97890"
},
{
"name": "Python",
"bytes": "519273"
}
],
"symlink_target": ""
} |
"""ML-ENSEMBLE
Test base functions used by sublearners
"""
import os
import numpy as np
from mlens.parallel._base_functions import slice_array, assign_predictions
# TODO: Write tests
| {
"content_hash": "1f01dbee90c5713ab157f0c37f833186",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 75,
"avg_line_length": 20.666666666666668,
"alnum_prop": 0.7741935483870968,
"repo_name": "flennerhag/mlens",
"id": "569af0a9ffb7ac3f9ae00449423f3ca2669721f5",
"size": "186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mlens/parallel/tests/test_a_base_functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "933041"
},
{
"name": "Shell",
"bytes": "238"
}
],
"symlink_target": ""
} |
import unittest
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.testsdk import (
ScenarioTest,
record_only
)
from azure.cli.command_modules.serviceconnector._resource_config import (
RESOURCE,
SOURCE_RESOURCES,
TARGET_RESOURCES
)
from ._test_utils import CredentialReplacer
@unittest.skip('Need containerapp extension installed')
class ContainerAppConnectionScenarioTest(ScenarioTest):
default_container_name = 'simple-hello-world-container'
def __init__(self, method_name):
super(ContainerAppConnectionScenarioTest, self).__init__(
method_name,
recording_processors=[CredentialReplacer()]
)
# @record_only()
def test_containerapp_mysql_e2e(self):
self.kwargs.update({
'subscription': get_subscription_id(self.cli_ctx),
'source_resource_group': 'servicelinker-test-linux-group',
'target_resource_group': 'servicelinker-test-linux-group',
'app': 'servicelinker-mysql-aca',
'server': 'servicelinker-mysql',
'database': 'mysqlDB'
})
# prepare password
user = 'servicelinker'
password = self.cmd('keyvault secret show --vault-name cupertino-kv-test -n TestDbPassword')\
.get_output_in_json().get('value')
# prepare params
name = 'testconn'
source_id = SOURCE_RESOURCES.get(RESOURCE.ContainerApp).format(**self.kwargs)
target_id = TARGET_RESOURCES.get(RESOURCE.Mysql).format(**self.kwargs)
# create connection, test clientType=None
self.cmd('containerapp connection create mysql --connection {} --source-id {} --target-id {} '
'--secret name={} secret={} --client-type none -c {}'.format(name, source_id, target_id, user, password, self.default_container_name))
# list connection
connections = self.cmd(
'containerapp connection list --source-id {}'.format(source_id),
checks = [
self.check('length(@)', 1),
self.check('[0].authInfo.authType', 'secret'),
self.check('[0].clientType', 'none')
]
).get_output_in_json()
connection_id = connections[0].get('id')
# update connection
self.cmd('containerapp connection update mysql --id {} --client-type dotnet '
'--secret name={} secret={}'.format(connection_id, user, password),
checks = [ self.check('clientType', 'dotnet') ])
# list configuration
self.cmd('containerapp connection list-configuration --id {}'.format(connection_id))
# validate connection
self.cmd('containerapp connection validate --id {}'.format(connection_id))
# show connection
self.cmd('containerapp connection show --id {}'.format(connection_id))
# delete connection
self.cmd('containerapp connection delete --id {} --yes'.format(connection_id))
def test_containerapp_mysql_e2e_kvsecret(self):
self.kwargs.update({
'subscription': get_subscription_id(self.cli_ctx),
'source_resource_group': 'servicelinker-test-linux-group',
'target_resource_group': 'servicelinker-test-linux-group',
'app': 'servicelinker-mysql-aca',
'server': 'servicelinker-mysql',
'database': 'mysqlDB'
})
# prepare password
user = 'servicelinker'
keyvaultUri = "https://cupertino-kv-test.vault.azure.net/secrets/TestDbPassword"
# prepare params
name = 'testconn'
source_id = SOURCE_RESOURCES.get(RESOURCE.ContainerApp).format(**self.kwargs)
target_id = TARGET_RESOURCES.get(RESOURCE.Mysql).format(**self.kwargs)
# create connection, test clientType=None
self.cmd('containerapp connection create mysql --connection {} --source-id {} --target-id {} '
'--secret name={} secret-uri={} --client-type none -c {}'.format(name, source_id, target_id, user, keyvaultUri, self.default_container_name))
# list connection
connections = self.cmd(
'containerapp connection list --source-id {}'.format(source_id),
checks = [
self.check('length(@)', 1),
self.check('[0].authInfo.authType', 'secret'),
self.check('[0].clientType', 'none')
]
).get_output_in_json()
connection_id = connections[0].get('id')
# delete connection
self.cmd('containerapp connection delete --id {} --yes'.format(connection_id))
# @record_only()
def test_containerapp_storageblob_e2e(self):
self.kwargs.update({
'subscription': get_subscription_id(self.cli_ctx),
'source_resource_group': 'servicelinker-test-linux-group',
'target_resource_group': 'servicelinker-test-linux-group',
'app': 'servicelinker-storage-aca',
'account': 'servicelinkerstorage'
})
# prepare params
name = 'testconn'
source_id = SOURCE_RESOURCES.get(RESOURCE.ContainerApp).format(**self.kwargs)
target_id = TARGET_RESOURCES.get(RESOURCE.StorageBlob).format(**self.kwargs)
# create connection
self.cmd('containerapp connection create storage-blob --connection {} --source-id {} --target-id {} '
'--system-identity --client-type python -c {}'.format(name, source_id, target_id, self.default_container_name))
# list connection
connections = self.cmd(
'containerapp connection list --source-id {}'.format(source_id),
checks = [
self.check('length(@)', 1),
self.check('[0].authInfo.authType', 'systemAssignedIdentity'),
self.check('[0].clientType', 'python')
]
).get_output_in_json()
connection_id = connections[0].get('id')
# update connection
self.cmd('containerapp connection update storage-blob --id {} --client-type dotnet'.format(connection_id),
checks = [ self.check('clientType', 'dotnet') ])
# list configuration
self.cmd('containerapp connection list-configuration --id {}'.format(connection_id))
# validate connection
self.cmd('containerapp connection validate --id {}'.format(connection_id))
# show connection
self.cmd('containerapp connection show --id {}'.format(connection_id))
# delete connection
self.cmd('containerapp connection delete --id {} --yes'.format(connection_id))
| {
"content_hash": "a4c6bf76e0c0d6ff2266562c4ea6e759",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 158,
"avg_line_length": 40.98148148148148,
"alnum_prop": 0.610784756740473,
"repo_name": "yugangw-msft/azure-cli",
"id": "db45584544f837f154d8189c16333b6947afe15d",
"size": "6951",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/azure-cli/azure/cli/command_modules/serviceconnector/tests/latest/test_containerapp_connection_scenario.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "5355"
},
{
"name": "Batchfile",
"bytes": "14110"
},
{
"name": "Bicep",
"bytes": "1679"
},
{
"name": "C#",
"bytes": "1971"
},
{
"name": "C++",
"bytes": "275"
},
{
"name": "Dockerfile",
"bytes": "8427"
},
{
"name": "HTML",
"bytes": "794"
},
{
"name": "JavaScript",
"bytes": "1404"
},
{
"name": "Jupyter Notebook",
"bytes": "389"
},
{
"name": "PowerShell",
"bytes": "1781"
},
{
"name": "Python",
"bytes": "24270340"
},
{
"name": "Rich Text Format",
"bytes": "12032"
},
{
"name": "Roff",
"bytes": "1036959"
},
{
"name": "Shell",
"bytes": "56023"
},
{
"name": "TSQL",
"bytes": "1145"
}
],
"symlink_target": ""
} |
"""Work with combinations of relationships, like *part_of* and *regulates*"""
from __future__ import print_function
__copyright__ = "Copyright (C) 2017-present, DV Klopfenstein, H Tang et al. All rights reserved."
from goatools.godag.consts import RELATIONSHIP_SET
class RelationshipCombos:
"""Work with combinations of relationships, like *part_of* and *regulates*"""
def __init__(self, godag):
self.godag_rels_loaded = hasattr(next(iter(godag.values())), 'relationship')
self.relationship_set = RELATIONSHIP_SET
self.go2obj = {go:o for go, o in godag.items() if go == o.id}
self.dag_rels = self._init_dag_relationships()
def get_relationship_combos(self):
"""Get all combinations of all lengths of relationship lists"""
rels_combo = []
num_rels = len(self.dag_rels)
dag_rels = sorted(self.dag_rels)
print('GODAG relationships[{N}]: {Rs}'.format(N=num_rels, Rs=dag_rels))
for cnt in range(2**num_rels):
idxs = [i for i, v in enumerate('{N:0{L}b}'.format(N=cnt, L=num_rels)) if v == '1']
if idxs:
rels_cur = set(dag_rels[i] for i in idxs)
rels_combo.append(rels_cur)
# print('{N:0{L}b}'.format(N=cnt, L=num_rels), idxs, rels_cur)
return rels_combo
def chk_relationships_all(self):
"""Check that the list of relationships in consts is same as found in GODAG"""
assert set(self.dag_rels) == self.relationship_set, \
set(self.dag_rels).symmetric_difference(self.relationship_set)
def get_set(self, relationships_arg):
"""Return a set of relationships found in all subset GO Terms."""
if relationships_arg:
if self.godag_rels_loaded:
relationships_dag = self.dag_rels
if relationships_arg is True:
return relationships_dag
relationshipset_usr = self._get_set_rel(relationships_arg)
if relationshipset_usr:
self._chk_expected(relationshipset_usr, relationships_dag)
return relationships_dag.intersection(relationshipset_usr)
print('**WARNING: UNKNOWN GODag relationships({R}). EXPECTED VALUES: {Rs}'.format(
R=relationships_arg, Rs=' '.join(sorted(relationships_dag))))
else:
err = ("""**WARNING: IGNORING(relationships={R}); NO GODag RELATIONSHIPS LOADED """
"""W/ARG, optional_attrs=['relationship']""")
print(err.format(R=relationships_arg))
return set()
@staticmethod
def _get_set_rel(relationships):
"""Return a set containing with prospective relationships"""
if isinstance(relationships, set):
return relationships
if isinstance(relationships, list):
return set(relationships)
if isinstance(relationships, str):
return set([relationships,])
return None
@staticmethod
def _chk_expected(relationships_usr, relationships_dag):
"""Check that user relationships were found"""
rels_unexpected = relationships_usr.difference(relationships_dag)
if not rels_unexpected:
return
print('**NOTE: RELATIONSHIPS IN GODag: SEEN({Rs}) NOT_SEEN({R})'.format(
R=' '.join(sorted(rels_unexpected)), Rs=' '.join(sorted(relationships_dag))))
def _init_dag_relationships(self):
"""Return all relationships seen in GO Dag subset."""
relationship_set = set()
if not self.godag_rels_loaded:
return relationship_set
for goterm in self.go2obj.values():
if goterm.relationship:
relationship_set.update(goterm.relationship)
if goterm.relationship_rev:
relationship_set.update(goterm.relationship_rev)
return relationship_set
# Copyright (C) 2017-present, DV Klopfenstein, H Tang et al. All rights reserved.
| {
"content_hash": "a61048674b6ee4fd058ebfe38531df44",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 99,
"avg_line_length": 45.19101123595506,
"alnum_prop": 0.6133764296369966,
"repo_name": "tanghaibao/goatools",
"id": "a844e908587982e9d1c89f0f0a1c81d495d7130b",
"size": "4022",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "goatools/godag/relationship_combos.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "316670"
},
{
"name": "Makefile",
"bytes": "25213"
},
{
"name": "Python",
"bytes": "146769147"
},
{
"name": "Shell",
"bytes": "1107"
}
],
"symlink_target": ""
} |
import numpy as np
# C modules
try:
from chronostar._overall_likelihood import get_overall_lnlikelihood_for_fixed_memb_probs
except ImportError:
print("C IMPLEMENTATION OF expectation NOT IMPORTED")
USE_C_IMPLEMENTATION = False
TODO = True # NOW WHAT?
# Read in stellar data
from chronostar import tabletool
from chronostar.run_em_files_python import expectation_marusa as expectationP
import time
####################################################################
#### DATA ##########################################################
####################################################################
import pickle
with open('data_for_testing/input_data_to_get_overall_lnlikelihood_for_fixed_memb_probs.pkl', 'rb') as f:
d = pickle.load(f)
st_mns = d[0]
st_covs = d[1]
gr_mns = d[2]
gr_covs = d[3]
bg_lnols = d[4]
memb_probs_new = d[5]
data_dict = d[6]
comps_new = d[7]
memb_probs_new = d[8] # same as d[5]
inc_posterior = d[9]
use_box_background = d[10]
####################################################################
#### Python ########################################################
####################################################################
#~ comps_new_list = [[comp.get_mean(), comp.get_covmatrix()] for comp in comps_new]
comps_new_list = [[comp.get_mean_now(), comp.get_covmatrix_now()] for comp in comps_new]
time_start = time.time()
overall_lnlikeP = expectationP.get_overall_lnlikelihood_for_fixed_memb_probs(
data_dict, comps_new_list, memb_probs=memb_probs_new,
inc_posterior=inc_posterior, use_box_background=use_box_background)
#~ overall_lnlikeP = expectationP.get_overall_lnlikelihood(
#~ data_dict, comps_new_list, old_memb_probs=memb_probs_new,
#~ inc_posterior=inc_posterior, use_box_background=use_box_background)
print('overall_lnlikeP', overall_lnlikeP)
duration_P = time.time()-time_start
print('Duration python:', duration_P)
####################################################################
#### C #############################################################
####################################################################
time_start = time.time()
overall_lnlikeC = get_overall_lnlikelihood_for_fixed_memb_probs(
st_mns, st_covs, gr_mns, gr_covs, bg_lnols, memb_probs_new)
#~ overall_lnlikeC = overall_lnlikeC[0] # TODO
print('overall_lnlikeC', overall_lnlikeC)
duration_C = time.time()-time_start
print('Duration C:', duration_C)
####################################################################
#### COMPARE TIME ##################################################
####################################################################
print('Duration_python / Duration_C', duration_P/duration_C)
####################################################################
#### COMPARE RESULTS ###############################################
####################################################################
diff = np.abs(overall_lnlikeC-overall_lnlikeP)
print('DIFF', diff)
print("TESTS FINISHED. \n")
| {
"content_hash": "57e72bac192899e7e870cba64cc0be64",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 105,
"avg_line_length": 35.94047619047619,
"alnum_prop": 0.48526001987413053,
"repo_name": "mikeireland/chronostar",
"id": "c98d8f4a8207f34fe063f8469e57f30abcb98c66",
"size": "3019",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fastfit/test_get_overall_likelihood.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "308199"
},
{
"name": "C++",
"bytes": "2106"
},
{
"name": "Makefile",
"bytes": "1032"
},
{
"name": "Python",
"bytes": "1116075"
},
{
"name": "SWIG",
"bytes": "4608"
},
{
"name": "Shell",
"bytes": "1163"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cavstudio_backend.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| {
"content_hash": "42a701b539d8367c5fe96119f99622df",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 81,
"avg_line_length": 37.76190476190476,
"alnum_prop": 0.6242118537200504,
"repo_name": "google-research/mood-board-search",
"id": "264ad823076f1e62e9fe6e411c946eccb9663e96",
"size": "1391",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "backend/manage.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1399"
},
{
"name": "HTML",
"bytes": "2098"
},
{
"name": "JavaScript",
"bytes": "1871"
},
{
"name": "Jupyter Notebook",
"bytes": "175456"
},
{
"name": "Makefile",
"bytes": "864"
},
{
"name": "Python",
"bytes": "104536"
},
{
"name": "SCSS",
"bytes": "5389"
},
{
"name": "TypeScript",
"bytes": "62924"
},
{
"name": "Vue",
"bytes": "146704"
}
],
"symlink_target": ""
} |
from machina.core.db.models import get_model
from machina.test.mixins import AdminBaseViewTestMixin
from machina.test.testcases import AdminClientTestCase
Post = get_model('forum_conversation', 'Post')
Topic = get_model('forum_conversation', 'Topic')
class TestPostAdmin(AdminClientTestCase, AdminBaseViewTestMixin):
model = Post
class TestTopicAdmin(AdminClientTestCase, AdminBaseViewTestMixin):
model = Topic
| {
"content_hash": "421de60d985b2b0ffdcdd73afd840684",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 66,
"avg_line_length": 28.333333333333332,
"alnum_prop": 0.8023529411764706,
"repo_name": "franga2000/django-machina",
"id": "4828931a0866577a629cc063ca8a1680813256ad",
"size": "450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/functional/admin/test_conversation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13665"
},
{
"name": "HTML",
"bytes": "138474"
},
{
"name": "JavaScript",
"bytes": "5866"
},
{
"name": "Makefile",
"bytes": "1599"
},
{
"name": "Python",
"bytes": "696565"
}
],
"symlink_target": ""
} |
""" OTAA Node example compatible with the LoPy Nano Gateway """
from network import LoRa
from network import WLAN
import binascii
import pycom
import socket
import struct
import time
import config as c
#Disable wifi or let default SSID
if c.DIS_WIFI:
wlan = WLAN()
wlan.deinit()
# Set heartbeat
pycom.heartbeat(c.HEARTBEAT)
# Initialize LoRa in LORAWAN mode.
lora = LoRa(mode=LoRa.LORAWAN)
# create an OTA authentication params
dev_eui = binascii.unhexlify(c.DEV_EUI.replace(' ',''))
app_eui = binascii.unhexlify(c.APP_EUI.replace(' ',''))
app_key = binascii.unhexlify(c.APP_KEY.replace(' ',''))
# set the 3 default channels to the same frequency (must be before sending the OTAA join request)
lora.add_channel(0, frequency=c.LORA_FREQUENCY, dr_min=0, dr_max=5)
lora.add_channel(1, frequency=c.LORA_FREQUENCY, dr_min=0, dr_max=5)
lora.add_channel(2, frequency=c.LORA_FREQUENCY, dr_min=0, dr_max=5)
# join a network using OTAA
lora.join(activation=LoRa.OTAA, auth=(dev_eui, app_eui, app_key), timeout=0)
# wait until the module has joined the network
while not lora.has_joined():
if c.DEBUG_CON:
print('Not joined yet...')
if c.DEBUG_LED:
pycom.rgbled(c.RED)
time.sleep(0.1)
pycom.rgbled(c.LED_OFF)
time.sleep(2)
# remove all the non-default channels
for i in range(3, 16):
lora.remove_channel(i)
# create a LoRa socket
s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)
# set the LoRaWAN data rate
s.setsockopt(socket.SOL_LORA, socket.SO_DR, c.LORA_DR)
# make the socket blocking
s.setblocking(False)
time.sleep(5.0)
i = 0
while True:
# Prepare the packet
pkt = c.PKT_PREFIX + bytes([i % 256])
# Send it
s.send(pkt)
if c.DEBUG_CON:
print('send >> ', pkt)
if c.DEBUG_LED:
pycom.rgbled(c.GREEN)
time.sleep(0.1)
pycom.rgbled(c.LED_OFF)
# Wait to receive packet
time.sleep(4)
rx = s.recv(256)
if rx and c.DEBUG_CON:
pycom.rgbled(c.BLUE)
time.sleep(0.1)
pycom.rgbled(c.LED_OFF)
print("receive << ", rx)
# Sleep
time.sleep(c.SLEEP_MAIN)
i += 1
| {
"content_hash": "a518f401c4cd3eb537d15612a58eeb17",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 97,
"avg_line_length": 25.345238095238095,
"alnum_prop": 0.6632221700328793,
"repo_name": "FablabLannion/LoPy",
"id": "cb656c1d011e4b367d88117005cd30bf16816d57",
"size": "2129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TTN/node/otaa_node.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "22662"
},
{
"name": "Shell",
"bytes": "524"
}
],
"symlink_target": ""
} |
import argparse
def parse_args():
parser = argparse.ArgumentParser(description='A terminal based typing tutor')
parser.add_argument('file', metavar='file', type=str,help='the file you want use for typing')
parser.add_argument('-s','--silent',action="store_true",default=False,help='show stats at the end')
return parser.parse_args()
| {
"content_hash": "9fbe41f0fcd576520a2d0db0763792c4",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 103,
"avg_line_length": 39.111111111111114,
"alnum_prop": 0.7159090909090909,
"repo_name": "girishramnani/t3",
"id": "247279a32be32e47ec0fcd3faba4cb84ba779e6e",
"size": "352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "t3/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "969"
},
{
"name": "Python",
"bytes": "8961"
}
],
"symlink_target": ""
} |
from django.db import models
class Event(models.Model):
dt = models.DateTimeField()
class MaybeEvent(models.Model):
dt = models.DateTimeField(blank=True, null=True)
class Timestamp(models.Model):
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
| {
"content_hash": "d984b5c486c2de1246472baeb94db1ce",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 53,
"avg_line_length": 28.272727272727273,
"alnum_prop": 0.7459807073954984,
"repo_name": "mixman/djangodev",
"id": "9296edf92494d9f339cd01aa89298ffc88804a7c",
"size": "311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/modeltests/timezones/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "88362"
},
{
"name": "Python",
"bytes": "7834206"
},
{
"name": "Shell",
"bytes": "9076"
}
],
"symlink_target": ""
} |
# Copyright Jacob Bennett 4/22/16
# v1.0
# Status: Stable
import requests, lxml.html, json, re
# Unique user-agent for crawling
headers = {'user-agent': 'Simple Scraper'}
class scrape_link:
def __init__(self, url):
# Request and Parse HTML of link
try:
t = requests.get(url, headers=headers)
t = lxml.html.fromstring(t.text)
except Exception:
t = ''
linksplit = url.split('/') # Split link up into sections
try:
# Tries for meta title first
self.title = t.xpath('//meta[@property="og:title"]/@content')[0]
except Exception:
try:
# Otherwise gets it from title tag
self.title = t.xpath('//title/text()')[0]
except Exception:
self.title = 'None'
try:
# Checks for meta image and retrieves it's URL
self.image = t.xpath('//meta[@property="og:image"]/@content')[0].split('//')[0]
# Makes sure it's not just getting blank or protocol
if self.image == '' or self.image == 'http:' or self.image == 'https:':
self.image = t.xpath('//meta[@property="og:image"]/@content')[0].split('//')[1]
try:
self.image = self.image + '//' + t.xpath('//meta[@property="og:image"]/@content')[0].split('//')[2]
except IndexError:
pass
self.image = re.sub(r':/', r'://', self.image)
except Exception:
self.image = 'None'
try:
self.description = t.xpath('//meta[@property="og:description"]/@content')[0]
except Exception:
self.description = 'None'
# Check if protocol is http or https
if url.startswith('http://'):
proto = 'http://'
elif url.startswith('https://'):
proto = 'https://'
else:
proto = ''
try:
# Initial check for favicon URL
self.favicon = t.xpath('//link[@rel="icon" or @rel="shortcut icon" or @rel="icon shortcut"]/@href')[0].split('//')[0]
# Make sure it isn't just getting a protocol
if self.favicon == '' or self.favicon == 'http:' or self.favicon == 'https:':
# If it is, get the real URL
self.favicon = t.xpath('//link[@rel="icon" or @rel="shortcut icon" or @rel="icon shortcut"]/@href')[0].split('//')[1]
try:
# If it found it, check if it is a live page
a = requests.get('http://' + self.favicon, headers=headers)
if a.status_code != 200:
raise Exception
except Exception:
try:
# If the request fails, try a different approach
# See if URL is on same domain
a = requests.get('http://' + linksplit[2] + '/' + self.favicon, headers=headers)
if a.status_code != 200:
self.favicon = 'None'
else:
self.favicon = linksplit[2] + '/' + self.favicon
except Exception:
self.favicon = 'None'
except Exception:
# If it's not found in HTML, check the general "domain.com/favicon.ico"
try:
a = requests.get('http://' + linksplit[2] + '/favicon.ico', headers=headers)
if a.status_code != 200:
self.favicon = 'None'
else:
# If the request is successful, it's probably the favicon
self.favicon = linksplit[2] + '/favicon.ico'
except Exception:
self.favicon = 'None'
# Return favicon with protocol
if self.favicon != 'None':
self.favicon = proto + self.favicon
# Check for valid linkid
def check_link(link):
if link.startswith('http://') or link.startswith('https://'):
if '.' in link and '<' not in link and '>' not in link and ';' not in link:
return True
else:
return False
| {
"content_hash": "579ba39c73e673ec73f48509b86ec3c2",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 133,
"avg_line_length": 40.95192307692308,
"alnum_prop": 0.48743836581357125,
"repo_name": "jac0bbennett/simplescrape",
"id": "5e31e4143c5666361ce87e65069856758cecf54d",
"size": "4282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simplescrape.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4282"
}
],
"symlink_target": ""
} |
""" Test Templates """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import qisrc.templates
def test_process_templates(tmpdir):
""" Test Process Templates """
tmpl = tmpdir.mkdir("tmpl")
tmpl_cmake_list = tmpl.ensure("CMakeLists.txt", file=True)
tmpl_cmake_list.write("""\
cmake_minimum_required(VERSION 3.0)
project(@ProjectName@)
add_executable(@project_name@ "@project_name@/@project_name@.cpp")
""")
tmpl_hpp = tmpl.ensure("@project_name@", "@project_name@.hpp", file=True)
tmpl_hpp.write("""\
#ifndef @PROJECT_NAME@_HPP
#define @PROJECT_NAME@_HPP
class @ProjectName@ {
public:
void @projectName@Register() {
// Your code goes here
}
};
#endif
""")
dest = tmpdir.mkdir("dest")
qisrc.templates.process(tmpl.strpath, dest.strpath, project_name="monthyPython")
dest_cmake = dest.join("CMakeLists.txt")
assert dest_cmake.read() == """\
cmake_minimum_required(VERSION 3.0)
project(MonthyPython)
add_executable(monthy_python "monthy_python/monthy_python.cpp")
"""
dest_hpp = dest.join("monthy_python", "monthy_python.hpp")
assert dest_hpp.read() == """\
#ifndef MONTHY_PYTHON_HPP
#define MONTHY_PYTHON_HPP
class MonthyPython {
public:
void monthyPythonRegister() {
// Your code goes here
}
};
#endif
"""
def test_process_string():
""" Test Process String """
res = qisrc.templates.process_string("@project_name@.cpp",
project_name="monthy_python")
assert res == "monthy_python.cpp"
res = qisrc.templates.process_string("#define @PROJECT_NAME@_HPP",
project_name="Foo")
assert res == "#define FOO_HPP"
res = qisrc.templates.process_string("#define @PROJECT_NAME@_HPP",
project_name="MonthyPython")
assert res == "#define MONTHY_PYTHON_HPP"
| {
"content_hash": "6512a544390cbca9e1a1d07b98e2cc8d",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 84,
"avg_line_length": 28.676470588235293,
"alnum_prop": 0.6338461538461538,
"repo_name": "aldebaran/qibuild",
"id": "ffaa89af7738905ddc955475098165446e57a18f",
"size": "2147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/qisrc/test/test_templates.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "6892"
},
{
"name": "C++",
"bytes": "23130"
},
{
"name": "CMake",
"bytes": "292637"
},
{
"name": "Makefile",
"bytes": "755"
},
{
"name": "Nix",
"bytes": "563"
},
{
"name": "Python",
"bytes": "1581825"
},
{
"name": "SWIG",
"bytes": "306"
},
{
"name": "Shell",
"bytes": "888"
}
],
"symlink_target": ""
} |
"""
WSGI config for friendlyphotos project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "friendlyphotos.settings")
# Original wsgi.py settings
# from django.core.wsgi import get_wsgi_application
# application = get_wsgi_application()
################################
### HEROKU-SPECIFIC SETTINGS ###
################################
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| {
"content_hash": "4e2fc78cf296db1dd8220997d9c51c3c",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 78,
"avg_line_length": 27.53846153846154,
"alnum_prop": 0.7122905027932961,
"repo_name": "friendlydjango/friendly-photos",
"id": "9e031dcbbf22c7769655418ce24c06296dc993e7",
"size": "716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "friendlyphotos/friendlyphotos/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "658"
},
{
"name": "HTML",
"bytes": "5606"
},
{
"name": "Python",
"bytes": "8664"
}
],
"symlink_target": ""
} |
import pytest
from hypothesis import given, settings, HealthCheck
from hypothesis import reproduce_failure # pylint: disable=unused-import
from itertools import product
import tempfile
import subprocess # nosec
from io import StringIO
import pandas as pd
import numpy as np
from tests.helpers import assert_df_equal
from tests.hypothesis_helper import dfs_min2, dfs_min
from tests.hypothesis_helper import max_examples, deadline
strandedness = [False, "same", "opposite"]
no_opposite = [False, "same"]
def run_bedtools(command,
gr,
gr2,
strandedness,
nearest_overlap=False,
nearest_how=None,
ties=""):
bedtools_strand = {False: "", "same": "-s", "opposite": "-S"}[strandedness]
bedtools_overlap = {True: "", False: "-io"}[nearest_overlap]
bedtools_how = {
"upstream": "-id",
"downstream": "-iu",
None: ""
}[nearest_how] + " -D a"
# print("bedtools how:", bedtools_how)
ties = "-t " + ties if ties else ""
with tempfile.TemporaryDirectory() as temp_dir:
f1 = "{}/f1.bed".format(temp_dir)
f2 = "{}/f2.bed".format(temp_dir)
gr.df.to_csv(f1, sep="\t", header=False, index=False)
gr2.df.to_csv(f2, sep="\t", header=False, index=False)
cmd = command.format(
f1=f1,
f2=f2,
strand=bedtools_strand,
overlap=bedtools_overlap,
bedtools_how=bedtools_how,
ties=ties)
print("cmd " * 5)
print(cmd)
# ignoring the below line in bandit as only strings created by
# the test suite is run here; no user input ever sought
result = subprocess.check_output( # nosec
cmd, shell=True, executable="/bin/bash").decode() #nosec
return result
def read_bedtools_result_set_op(bedtools_result, strandedness):
if strandedness:
usecols = [0, 1, 2, 5]
names = "Chromosome Start End Strand".split()
else:
usecols = [0, 1, 2]
names = "Chromosome Start End".split()
return pd.read_csv(
StringIO(bedtools_result),
header=None,
usecols=usecols,
names=names,
# dtype={
# "Start": np.int32,
# "End": np.int32
# },
sep="\t")
def compare_results(bedtools_df, result):
# from pydbg import dbg
# dbg(bedtools_df.dtypes)
# dbg(result.df.dtypes)
if not bedtools_df.empty:
assert_df_equal(result.df, bedtools_df)
else:
assert bedtools_df.empty == result.df.empty
def compare_results_nearest(bedtools_df, result):
if not bedtools_df.empty:
bedtools_df = bedtools_df[bedtools_df.Distance != -1]
result = result.df
if not len(result) == 0:
bedtools_df = bedtools_df.sort_values("Start End Distance".split())
result = result.sort_values("Start End Distance".split())
result_df = result["Chromosome Start End Strand Distance".split()]
assert_df_equal(result_df, bedtools_df)
else:
assert bedtools_df.empty
@pytest.mark.bedtools
@pytest.mark.parametrize("strandedness", no_opposite)
@settings(
max_examples=max_examples,
deadline=deadline,
print_blob=True,
suppress_health_check=HealthCheck.all())
@given(gr=dfs_min(), gr2=dfs_min()) # pylint: disable=no-value-for-parameter
def test_set_intersect(gr, gr2, strandedness):
set_intersect_command = "bedtools intersect {strand} -a <(sort -k1,1 -k2,2n {f1} | bedtools merge {strand} -c 4,5,6 -o first -i -) -b <(sort -k1,1 -k2,2n {f2} | bedtools merge {strand} -c 4,5,6 -o first -i -)"
bedtools_result = run_bedtools(set_intersect_command, gr, gr2,
strandedness)
bedtools_df = read_bedtools_result_set_op(bedtools_result, strandedness)
result = gr.set_intersect(gr2, strandedness=strandedness)
compare_results(bedtools_df, result)
@pytest.mark.bedtools
@pytest.mark.parametrize("strandedness", no_opposite)
@settings(
max_examples=max_examples,
deadline=deadline,
print_blob=True,
suppress_health_check=HealthCheck.all())
@given(gr=dfs_min(), gr2=dfs_min()) # pylint: disable=no-value-for-parameter
# @reproduce_failure('4.15.0', b'AXicY2RAA4zoAgAAVQAD')
def test_set_union(gr, gr2, strandedness):
set_union_command = "cat {f1} {f2} | bedtools sort | bedtools merge {strand} -c 4,5,6 -o first -i -" # set_union_command = "bedtools merge {strand} -c 4,5,6 -o first -i {f1}"
bedtools_result = run_bedtools(set_union_command, gr, gr2, strandedness)
bedtools_df = read_bedtools_result_set_op(bedtools_result, strandedness)
result = gr.set_union(gr2, strandedness=strandedness)
compare_results(bedtools_df, result)
@pytest.mark.bedtools
@pytest.mark.parametrize("strandedness", strandedness)
@settings(
max_examples=max_examples,
deadline=deadline,
print_blob=True,
suppress_health_check=HealthCheck.all())
@given(gr=dfs_min(), gr2=dfs_min()) # pylint: disable=no-value-for-parameter
# @reproduce_failure('4.32.2', b'AXicY2RAA4wQzIgiCAAAgAAF')
# @reproduce_failure('5.5.4', b'AXicY2RABYyMEAqKGRgAAHMABg==')
def test_overlap(gr, gr2, strandedness):
overlap_command = "bedtools intersect -u {strand} -a {f1} -b {f2}"
bedtools_result = run_bedtools(overlap_command, gr, gr2, strandedness)
bedtools_df = pd.read_csv(
StringIO(bedtools_result),
header=None,
names="Chromosome Start End Name Score Strand".split(),
sep="\t")
result = gr.overlap(gr2, strandedness=strandedness)
compare_results(bedtools_df, result)
@pytest.mark.bedtools
@pytest.mark.parametrize("strandedness", strandedness)
@settings(
max_examples=max_examples,
deadline=deadline,
print_blob=True,
suppress_health_check=HealthCheck.all())
@given(gr=dfs_min(), gr2=dfs_min()) # pylint: disable=no-value-for-parameter
def test_intersect(gr, gr2, strandedness):
intersect_command = "bedtools intersect {strand} -a {f1} -b {f2}"
bedtools_result = run_bedtools(intersect_command, gr, gr2, strandedness)
bedtools_df = pd.read_csv(
StringIO(bedtools_result),
header=None,
names="Chromosome Start End Name Score Strand".split(),
sep="\t")
result = gr.intersect(gr2, strandedness=strandedness)
compare_results(bedtools_df, result)
@pytest.mark.bedtools
@pytest.mark.parametrize("strandedness", strandedness)
@settings(
max_examples=max_examples,
print_blob=True,
deadline=deadline,
suppress_health_check=HealthCheck.all())
@given(gr=dfs_min(), gr2=dfs_min()) # pylint: disable=no-value-for-parameter
# @reproduce_failure('4.15.0', b'AXicY2RABoxghAoAAGkABA==')
# @reproduce_failure('4.15.0', b'AXicY2RABoxgxAAjQQAAAG8ABQ==')
# @reproduce_failure('4.15.0', b'AXicY2RABqwMDIwMaAAAALkACA==')
# @reproduce_failure('4.15.0', b'AXicY2RAA4xIJAgAAABcAAQ=')
# reproduce_failure('4.15.0', b'AXicY2RAAEYGhv9AkhHGgQIAFHQBBQ==')
# @reproduce_failure('4.15.0', b'AXicY2QAAUYGGGCEYIQAVAgAALUACA==')
def test_coverage(gr, gr2, strandedness):
print(gr.df)
print(gr2.df)
coverage_command = "bedtools coverage {strand} -a {f1} -b {f2}"
bedtools_result = run_bedtools(coverage_command, gr, gr2, strandedness)
bedtools_df = pd.read_csv(
StringIO(bedtools_result),
header=None,
usecols=[0, 1, 2, 3, 4, 5, 6, 9],
names=
"Chromosome Start End Name Score Strand NumberOverlaps FractionOverlaps"
.split(),
dtype={"FractionOverlap": np.float},
sep="\t")
result = gr.coverage(gr2, strandedness=strandedness)
print("pyranges")
print(result.df)
print("bedtools")
print(bedtools_df)
# assert len(result) > 0
assert np.all(
bedtools_df.NumberOverlaps.values == result.NumberOverlaps.values)
np.testing.assert_allclose(
bedtools_df.FractionOverlaps, result.FractionOverlaps, atol=1e-5)
# compare_results(bedtools_df, result)
# @pytest.mark.bedtools
# @pytest.mark.parametrize("strandedness", strandedness)
# @settings(
# max_examples=max_examples,
# deadline=deadline,
# suppress_health_check=HealthCheck.all())
# @given(gr=dfs_min(), gr2=dfs_min()) # pylint: disable=no-value-for-parameter
# @reproduce_failure('4.15.0', b'AXicY2RgYGAEIzgAsRkZUfkMDAAA2AAI')
# def test_no_intersect(gr, gr2, strandedness):
# intersect_command = "bedtools intersect -v {strand} -a {f1} -b {f2}"
# bedtools_result = run_bedtools(intersect_command, gr, gr2, strandedness)
# bedtools_df = pd.read_csv(
# StringIO(bedtools_result),
# header=None,
# names="Chromosome Start End Name Score Strand".split(),
# sep="\t")
# # bedtools bug: https://github.com/arq5x/bedtools2/issues/719
# result = gr.no_overlap(gr2, strandedness=strandedness)
# from pydbg import dbg
# dbg(result)
# dbg(bedtools_df)
# # result2 = gr.intersect(gr2, strandedness)
# compare_results(bedtools_df, result)
@pytest.mark.bedtools
@pytest.mark.parametrize("strandedness", ["same", "opposite", False]) #
@settings(
max_examples=max_examples,
deadline=deadline,
print_blob=True,
suppress_health_check=HealthCheck.all())
@given(gr=dfs_min(), gr2=dfs_min()) # pylint: disable=no-value-for-parameter
# @reproduce_failure('4.5.7', b'AXicLYaJCQAACIS0/YfuuQRRAbVG94Dk5LHSBgJ3ABU=')
# @reproduce_failure('4.15.0', b'AXicY2QAAUYGGAVlIQAAAIIABQ==')
def test_subtraction(gr, gr2, strandedness):
subtract_command = "bedtools subtract {strand} -a {f1} -b {f2}"
bedtools_result = run_bedtools(subtract_command, gr, gr2, strandedness)
bedtools_df = pd.read_csv(
StringIO(bedtools_result),
header=None,
names="Chromosome Start End Name Score Strand".split(),
sep="\t")
print("subtracting" * 50)
result = gr.subtract(gr2, strandedness=strandedness)
print("bedtools_result")
print(bedtools_df)
print("PyRanges result:")
print(result)
compare_results(bedtools_df, result)
nearest_hows = [None, "upstream", "downstream"]
overlaps = [True, False]
@pytest.mark.bedtools
@pytest.mark.parametrize("nearest_how,overlap,strandedness",
product(nearest_hows, overlaps, strandedness))
@settings(
max_examples=max_examples,
deadline=deadline,
print_blob=True,
suppress_health_check=HealthCheck.all())
@given(gr=dfs_min(), gr2=dfs_min()) # pylint: disable=no-value-for-parameter
# @reproduce_failure('4.32.2', b'AXicY2RkAAEQWf///38IByYGoYEAAFjhA4Q=')
def test_nearest(gr, gr2, nearest_how, overlap, strandedness):
nearest_command = "bedtools closest {bedtools_how} {strand} {overlap} -t first -d -a <(sort -k1,1 -k2,2n {f1}) -b <(sort -k1,1 -k2,2n {f2})"
bedtools_result = run_bedtools(nearest_command, gr, gr2, strandedness,
overlap, nearest_how)
bedtools_df = pd.read_csv(
StringIO(bedtools_result),
header=None,
names="Chromosome Start End Strand Chromosome2 Distance".split(),
usecols=[0, 1, 2, 5, 6, 12],
sep="\t")
bedtools_df.Distance = bedtools_df.Distance.abs()
bedtools_df = bedtools_df[bedtools_df.Chromosome2 != "."]
bedtools_df = bedtools_df.drop("Chromosome2", 1)
result = gr.nearest(
gr2, strandedness=strandedness, overlap=overlap, how=nearest_how)
print("bedtools " * 5)
print(bedtools_df)
print("result " * 5)
print(result)
compare_results_nearest(bedtools_df, result)
@pytest.mark.bedtools
@pytest.mark.parametrize("strandedness", no_opposite)
@settings(
max_examples=max_examples,
deadline=deadline,
print_blob=True,
suppress_health_check=HealthCheck.all())
@given(gr=dfs_min(), gr2=dfs_min()) # pylint: disable=no-value-for-parameter
def test_jaccard(gr, gr2, strandedness):
"""Bedtools segfaults"""
jaccard_command = "bedtools jaccard {strand} -a <(sort -k1,1 -k2,2n {f1}) -b <(sort -k1,1 -k2,2n {f2})"
# # https://github.com/arq5x/bedtools2/issues/645
# # will make tests proper when bedtools is fixed
result = gr.stats.jaccard(gr2, strandedness=strandedness)
assert 0 <= result <= 1
@pytest.mark.bedtools
@pytest.mark.parametrize("strandedness", strandedness)
@settings(
max_examples=max_examples,
deadline=deadline,
print_blob=True,
suppress_health_check=HealthCheck.all())
@given(gr=dfs_min(), gr2=dfs_min()) # pylint: disable=no-value-for-parameter
def test_join(gr, gr2, strandedness):
join_command = "bedtools intersect {strand} -wo -a {f1} -b {f2}"
bedtools_result = run_bedtools(join_command, gr, gr2, strandedness)
bedtools_df = pd.read_csv(
StringIO(bedtools_result),
header=None,
sep="\t",
names=
"Chromosome Start End Name Score Strand Chromosome_b Start_b End_b Name_b Score_b Strand_b Overlap"
.split(),
dtype={
"Chromosome": "category",
"Strand": "category"
}).drop(
"Chromosome_b Overlap".split(), axis=1)
result = gr.join(gr2, strandedness=strandedness)
if result.df.empty:
assert bedtools_df.empty
else:
assert_df_equal(result.df, bedtools_df)
@pytest.mark.bedtools
@settings(
max_examples=max_examples,
deadline=deadline,
print_blob=True,
suppress_health_check=HealthCheck.all())
@given(gr=dfs_min2(), gr2=dfs_min2()) # pylint: disable=no-value-for-parameter
def test_reldist(gr, gr2):
reldist_command = "bedtools reldist -a <(sort -k1,1 -k2,2n {f1}) -b <(sort -k1,1 -k2,2n {f2})"
bedtools_result = run_bedtools(reldist_command, gr, gr2, False)
bedtools_result = pd.read_csv(StringIO(bedtools_result), sep="\t")
print("bedtools_result")
print(bedtools_result.reldist)
result = gr.stats.relative_distance(gr2)
print("result")
print(result.reldist)
# bug in bedtools, therefore not testing this properly
# https://github.com/arq5x/bedtools2/issues/711
assert 1
new_pos = ["union"] # ["intersection", "union"]
# @pytest.mark.parametrize("strandedness,new_pos", product(
# strandedness, new_pos))
# @settings(
# max_examples=max_examples,
# deadline=deadline,
# print_blob=True,
# suppress_health_check=HealthCheck.all())
# @given(gr=dfs_min(), gr2=dfs_min()) # pylint: disable=no-value-for-parameter
# def test_join_new_pos(gr, gr2, strandedness, new_pos):
# result = gr.join(gr2, strandedness=strandedness).new_position(new_pos)
# import numpy as np
# result2 = gr.join(gr2, strandedness=strandedness)
# if result.df.empty:
# assert result2.df.empty
# else:
# if new_pos == "union":
# new_starts = np.minimum(result2.Start, result2.Start_b)
# new_ends = np.maximum(result2.End, result2.End_b)
# else:
# new_starts = np.maximum(result2.Start, result2.Start_b)
# new_ends = np.minimum(result2.End, result2.End_b)
# assert list(result.Start.values) == list(new_starts)
# assert list(result.End.values) == list(new_ends)
# @pytest.mark.parametrize("strand", [True, False])
# @settings(
# max_examples=max_examples,
# deadline=deadline,
# suppress_health_check=HealthCheck.all())
# @given(gr=dfs_min_with_gene_id()) # pylint: disable=no-value-for-parameter
# def test_introns(gr, strand):
# result = gr.features.introns()
# print(result)
# df = gr.df
# grs = []
# for g, gdf in df.groupby("ID"):
# grs.append(pr.PyRanges(gdf))
# expected = pr.concat([gr.merge() for gr in grs]).df
# print(expected)
# print(result)
# assert_df_equal(result, expected)
k_nearest_ties = ["first", "last", None]
# k_nearest_ties = ["first", None]
k_nearest_ties = ["last"]
k_nearest_params = reversed(list(product(nearest_hows, [True, False], strandedness, k_nearest_ties)))
@pytest.mark.bedtools
@pytest.mark.explore
@pytest.mark.parametrize("nearest_how,overlap,strandedness,ties", k_nearest_params) #
@settings(
max_examples=max_examples,
deadline=deadline,
print_blob=True,
suppress_health_check=HealthCheck.all())
@given(gr=dfs_min(), gr2=dfs_min()) # pylint: disable=no-value-for-parameter
# @reproduce_failure('4.43.5', b'AXicY2RAA4zoTAAAWwAE')
def test_k_nearest(gr, gr2, nearest_how, overlap, strandedness, ties):
print("-----" * 20)
# gr = gr.apply(lambda df: df.astype({"Start": np.int32, "End": np.int32}))
# gr2 = gr2.apply(lambda df: df.astype({"Start": np.int32, "End": np.int32}))
# print(gr)
# print(gr2)
nearest_command = "bedtools closest -k 2 {bedtools_how} {strand} {overlap} {ties} -a <(sort -k1,1 -k2,2n {f1}) -b <(sort -k1,1 -k2,2n {f2})"
bedtools_result = run_bedtools(nearest_command, gr, gr2, strandedness,
overlap, nearest_how, ties)
bedtools_df = pd.read_csv(
StringIO(bedtools_result),
header=None,
names="Chromosome Start End Strand Chromosome2 Distance".split(),
usecols=[0, 1, 2, 5, 6, 12],
sep="\t")
bedtools_df.Distance = bedtools_df.Distance.abs()
bedtools_df = bedtools_df[bedtools_df.Chromosome2 != "."]
bedtools_df = bedtools_df.drop("Chromosome2", 1)
# cannot test with k > 1 because bedtools algo has different syntax
# cannot test keep_duplicates "all" or None/False properly, as the semantics is different for bedtools
result = gr.k_nearest(
gr2, k=2, strandedness=strandedness, overlap=overlap, how=nearest_how, ties=ties)
# result = result.apply(lambda df: df.astype({"Start": np.int64, "End": np.int64, "Distance": np.int64}))
if len(result):
result.Distance = result.Distance.abs()
print("bedtools " * 5)
print(bedtools_df)
print("result " * 5)
print(result)
compare_results_nearest(bedtools_df, result)
# @settings(
# max_examples=max_examples,
# deadline=deadline,
# print_blob=True,
# suppress_health_check=HealthCheck.all())
# @given(gr=dfs_min()) # pylint: disable=no-value-for-parameter
# def test_k_nearest_nearest_self_same_size(gr):
# result = gr.k_nearest(
# gr, k=1, strandedness=None, overlap=True, how=None, ties="first")
# assert len(result) == len(gr)
@settings(
max_examples=max_examples,
deadline=deadline,
print_blob=True,
suppress_health_check=HealthCheck.all())
@given(gr=dfs_min(), gr2=dfs_min()) # pylint: disable=no-value-for-parameter
def test_k_nearest_1_vs_nearest(gr, gr2):
result_k = gr.k_nearest(gr2, k=1, strandedness=None, overlap=True, how=None)
if len(result_k) > 0:
result_k.Distance = result_k.Distance.abs()
result_n = gr.nearest(gr2, strandedness=None, overlap=True, how=None)
if len(result_k) == 0 and len(result_n) == 0:
pass
else:
assert (result_k.sort().Distance.abs() == result_n.sort().Distance).all()
| {
"content_hash": "a1a494691c95837a2a137912d99b3ebb",
"timestamp": "",
"source": "github",
"line_count": 592,
"max_line_length": 213,
"avg_line_length": 32.125,
"alnum_prop": 0.6499106110001052,
"repo_name": "biocore-ntnu/pyranges",
"id": "d82f41226f90c72429cb2ff7cec8bc89ab23bdca",
"size": "19018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_binary.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "556921"
},
{
"name": "R",
"bytes": "360"
},
{
"name": "Shell",
"bytes": "345"
}
],
"symlink_target": ""
} |
"""
theflasktest.config
~~~~~~~~~~~~
Implements the configuration related objects.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import imp
import os
import errno
from werkzeug.utils import import_string
from ._compat import string_types
class ConfigAttribute(object):
"""Makes an attribute forward to the config"""
def __init__(self, name, get_converter=None):
self.__name__ = name
self.get_converter = get_converter
def __get__(self, obj, type=None):
if obj is None:
return self
rv = obj.config[self.__name__]
if self.get_converter is not None:
rv = self.get_converter(rv)
return rv
def __set__(self, obj, value):
obj.config[self.__name__] = value
class Config(dict):
"""Works exactly like a dict but provides ways to fill it from files
or special dictionaries. There are two common patterns to populate the
config.
Either you can fill the config from a config file::
app.config.from_pyfile('yourconfig.cfg')
Or alternatively you can define the configuration options in the
module that calls :meth:`from_object` or provide an import path to
a module that should be loaded. It is also possible to tell it to
use the same module and with that provide the configuration values
just before the call::
DEBUG = True
SECRET_KEY = 'development key'
app.config.from_object(__name__)
In both cases (loading from any Python file or loading from modules),
only uppercase keys are added to the config. This makes it possible to use
lowercase values in the config file for temporary values that are not added
to the config or to define the config keys in the same file that implements
the application.
Probably the most interesting way to load configurations is from an
environment variable pointing to a file::
app.config.from_envvar('YOURAPPLICATION_SETTINGS')
In this case before launching the application you have to set this
environment variable to the file you want to use. On Linux and OS X
use the export statement::
export YOURAPPLICATION_SETTINGS='/path/to/config/file'
On windows use `set` instead.
:param root_path: path to which files are read relative from. When the
config object is created by the application, this is
the application's :attr:`~theflasktest.Flask.root_path`.
:param defaults: an optional dictionary of default values
"""
def __init__(self, root_path, defaults=None):
dict.__init__(self, defaults or {})
self.root_path = root_path
def from_envvar(self, variable_name, silent=False):
"""Loads a configuration from an environment variable pointing to
a configuration file. This is basically just a shortcut with nicer
error messages for this line of code::
app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
:param variable_name: name of the environment variable
:param silent: set to `True` if you want silent failure for missing
files.
:return: bool. `True` if able to load config, `False` otherwise.
"""
rv = os.environ.get(variable_name)
if not rv:
if silent:
return False
raise RuntimeError('The environment variable %r is not set '
'and as such configuration could not be '
'loaded. Set this variable and make it '
'point to a configuration file' %
variable_name)
return self.from_pyfile(rv, silent=silent)
def from_pyfile(self, filename, silent=False):
"""Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to `True` if you want silent failure for missing
files.
.. versionadded:: 0.7
`silent` parameter.
"""
filename = os.path.join(self.root_path, filename)
d = imp.new_module('config')
d.__file__ = filename
try:
with open(filename) as config_file:
exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
except IOError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
self.from_object(d)
return True
def from_object(self, obj):
"""Updates the values from the given object. An object can be of one
of the following two types:
- a string: in this case the object with that name will be imported
- an actual object reference: that object is used directly
Objects are usually either modules or classes.
Just the uppercase variables in that object are stored in the config.
Example usage::
app.config.from_object('yourapplication.default_config')
from yourapplication import default_config
app.config.from_object(default_config)
You should not use this function to load the actual configuration but
rather configuration defaults. The actual config should be loaded
with :meth:`from_pyfile` and ideally from a location not within the
package because the package might be installed system wide.
:param obj: an import name or object
"""
if isinstance(obj, string_types):
obj = import_string(obj)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self))
| {
"content_hash": "d11cb95f219d8a9cc1b9889201618e1d",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 79,
"avg_line_length": 37.26946107784431,
"alnum_prop": 0.6222686375321337,
"repo_name": "rosudrag/Freemium-winner",
"id": "b9a30331fdb40cbe9b79a1e44d3b24aad1abbf62",
"size": "6248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "VirtualEnvironment/Lib/site-packages/flask/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1079"
},
{
"name": "C",
"bytes": "6349"
},
{
"name": "CSS",
"bytes": "12216"
},
{
"name": "Groff",
"bytes": "17679"
},
{
"name": "HTML",
"bytes": "13547"
},
{
"name": "JavaScript",
"bytes": "35679"
},
{
"name": "PowerShell",
"bytes": "1506"
},
{
"name": "Python",
"bytes": "12351458"
},
{
"name": "Tcl",
"bytes": "24447"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pysec'
copyright = u'2015, Martin Thoma'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pysecdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pysec.tex', u'pysec Documentation',
u'Martin Thoma', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pysec', u'pysec Documentation',
[u'Martin Thoma'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pysec', u'pysec Documentation',
u'Martin Thoma', 'pysec', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| {
"content_hash": "139748e53fb4f61c88ca596bdb165576",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 79,
"avg_line_length": 31.352941176470587,
"alnum_prop": 0.7036898061288305,
"repo_name": "MartinThoma/pysec",
"id": "dbae2b4add885f33a4f57b14b32828c375eadffc",
"size": "8413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "417"
},
{
"name": "Python",
"bytes": "12290"
}
],
"symlink_target": ""
} |
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import platform
from mock import patch
import numpy as np
from numpy.testing import assert_allclose
from nose.tools import raises
from menpo.transform import PiecewiseAffine, ThinPlateSplines
from menpo.feature import sparse_hog, igo, lbp, no_op
import menpo.io as mio
from menpo.landmark import ibug_face_68_trimesh
from menpofit.aam import AAMBuilder, PatchBasedAAMBuilder
# load images
filenames = ['breakingbad.jpg', 'takeo.ppm', 'lenna.png', 'einstein.jpg']
training = []
for i in range(4):
im = mio.import_builtin_asset(filenames[i])
im.crop_to_landmarks_proportion_inplace(0.1)
if im.n_channels == 3:
im = im.as_greyscale(mode='luminosity')
training.append(im)
# build aams
template_trilist_image = training[0].landmarks[None]
trilist = ibug_face_68_trimesh(template_trilist_image)[1].lms.trilist
aam1 = AAMBuilder(features=[igo, sparse_hog, no_op],
transform=PiecewiseAffine,
trilist=trilist,
normalization_diagonal=150,
n_levels=3,
downscale=2,
scaled_shape_models=False,
max_shape_components=[1, 2, 3],
max_appearance_components=[3, 3, 3],
boundary=3).build(training)
aam2 = AAMBuilder(features=[no_op, no_op],
transform=ThinPlateSplines,
trilist=None,
normalization_diagonal=None,
n_levels=2,
downscale=1.2,
scaled_shape_models=True,
max_shape_components=None,
max_appearance_components=1,
boundary=0).build(training)
aam3 = AAMBuilder(features=igo,
transform=ThinPlateSplines,
trilist=None,
normalization_diagonal=None,
n_levels=1,
downscale=3,
scaled_shape_models=True,
max_shape_components=[2],
max_appearance_components=10,
boundary=2).build(training)
aam4 = PatchBasedAAMBuilder(features=lbp,
patch_shape=(10, 13),
normalization_diagonal=200,
n_levels=2,
downscale=1.2,
scaled_shape_models=True,
max_shape_components=1,
max_appearance_components=None,
boundary=2).build(training)
@raises(ValueError)
def test_features_exception():
AAMBuilder(features=[igo, sparse_hog]).build(training)
@raises(ValueError)
def test_n_levels_exception():
AAMBuilder(n_levels=0).build(training)
@raises(ValueError)
def test_downscale_exception():
aam = AAMBuilder(downscale=1).build(training)
assert (aam.downscale == 1)
AAMBuilder(downscale=0).build(training)
@raises(ValueError)
def test_normalization_diagonal_exception():
aam = AAMBuilder(normalization_diagonal=100).build(training)
assert (aam.appearance_models[0].n_features == 382)
AAMBuilder(normalization_diagonal=10).build(training)
@raises(ValueError)
def test_max_shape_components_exception():
AAMBuilder(max_shape_components=[1, 0.2, 'a']).build(training)
@raises(ValueError)
def test_max_appearance_components_exception():
AAMBuilder(max_appearance_components=[1, 2]).build(training)
@raises(ValueError)
def test_boundary_exception():
AAMBuilder(boundary=-1).build(training)
@patch('sys.stdout', new_callable=StringIO)
def test_verbose_mock(mock_stdout):
AAMBuilder().build(training, verbose=True)
@patch('sys.stdout', new_callable=StringIO)
def test_str_mock(mock_stdout):
print(aam1)
print(aam2)
print(aam3)
print(aam4)
def test_aam_1():
assert(aam1.n_training_images == 4)
assert(aam1.n_levels == 3)
assert(aam1.downscale == 2)
#assert(aam1.features[0] == igo and aam1.features[2] == no_op)
assert_allclose(np.around(aam1.reference_shape.range()), (109., 103.))
assert(not aam1.scaled_shape_models)
assert(not aam1.pyramid_on_features)
assert_allclose([aam1.shape_models[j].n_components
for j in range(aam1.n_levels)], (1, 2, 3))
assert (np.all([aam1.appearance_models[j].n_components == 3
for j in range(aam1.n_levels)]))
assert_allclose([aam1.appearance_models[j].template_instance.n_channels
for j in range(aam1.n_levels)], (2, 36, 1))
assert_allclose([aam1.appearance_models[j].components.shape[1]
for j in range(aam1.n_levels)], (14892, 268056, 7446))
def test_aam_2():
assert (aam2.n_training_images == 4)
assert (aam2.n_levels == 2)
assert (aam2.downscale == 1.2)
#assert (aam2.features[0] == no_op and aam2.features[1] == no_op)
assert_allclose(np.around(aam2.reference_shape.range()), (169., 161.))
assert aam2.scaled_shape_models
assert (not aam2.pyramid_on_features)
assert (np.all([aam2.shape_models[j].n_components == 3
for j in range(aam2.n_levels)]))
assert (np.all([aam2.appearance_models[j].n_components == 1
for j in range(aam2.n_levels)]))
assert (np.all([aam2.appearance_models[j].template_instance.n_channels == 1
for j in range(aam2.n_levels)]))
assert_allclose([aam2.appearance_models[j].components.shape[1]
for j in range(aam2.n_levels)], (12827, 18518))
def test_aam_3():
assert (aam3.n_training_images == 4)
assert (aam3.n_levels == 1)
assert (aam3.downscale == 3)
#assert (aam3.features[0] == igo and len(aam3.features) == 1)
assert_allclose(np.around(aam3.reference_shape.range()), (169., 161.))
assert aam3.scaled_shape_models
assert aam3.pyramid_on_features
assert (np.all([aam3.shape_models[j].n_components == 2
for j in range(aam3.n_levels)]))
assert (np.all([aam3.appearance_models[j].n_components == 3
for j in range(aam3.n_levels)]))
assert (np.all([aam3.appearance_models[j].template_instance.n_channels == 2
for j in range(aam3.n_levels)]))
assert_allclose([aam3.appearance_models[j].components.shape[1]
for j in range(aam3.n_levels)], 37036)
def test_aam_4():
assert (aam4.n_training_images == 4)
assert (aam4.n_levels == 2)
assert (aam4.downscale == 1.2)
#assert (aam4.features[0] == lbp)
assert_allclose(np.around(aam4.reference_shape.range()), (145., 138.))
assert aam4.scaled_shape_models
assert aam4.pyramid_on_features
assert (np.all([aam4.shape_models[j].n_components == 1
for j in range(aam4.n_levels)]))
assert (np.all([aam4.appearance_models[j].n_components == 3
for j in range(aam4.n_levels)]))
assert (np.all([aam4.appearance_models[j].template_instance.n_channels == 4
for j in range(aam4.n_levels)]))
if platform.system() != 'Windows':
# https://github.com/menpo/menpo/issues/450
assert_allclose([aam4.appearance_models[j].components.shape[1]
for j in range(aam4.n_levels)], (23656, 25988))
| {
"content_hash": "505b44aabb31c8b1706005e0150b0c24",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 79,
"avg_line_length": 36.92462311557789,
"alnum_prop": 0.6133641807294502,
"repo_name": "mrgloom/menpofit",
"id": "6bcaf3a2f1f908309f08d8cba56881826fbe0287",
"size": "7348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "menpofit/test/aam_builder_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "145"
},
{
"name": "Python",
"bytes": "894290"
},
{
"name": "Shell",
"bytes": "280"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import math, logging, re, numpy as np
from collections import OrderedDict
from pbcore.io import FastaReader, FastaRecord
class ReferenceContig(object):
"""
A contig from a reference (i.e. FASTA) file.
"""
def __init__(self, id, name, md5sum, sequence, length):
self.id = id # CmpH5-local id
self.name = name # Fasta header
self.md5sum = md5sum
self.sequence = sequence
self.length = length
byName = OrderedDict() # Fasta header (string e.g. "chr1") -> FastaRecord
byId = OrderedDict() # CmpH5 local id (integer) -> FastaRecord
byMD5 = OrderedDict() # MD5 sum (e.g. "a13...") -> FastaRecord
def idToName(_id):
return byId[_id].name
def nameToId(name):
return byName[name].id
# Interpret a string key (one of name, or id (as string))
# and find the associated id. Only to be used in interpretation of
# command-line input!
def anyKeyToId(stringKey):
assert isLoaded()
if stringKey in byName:
return byName[stringKey].id
elif stringKey.isdigit():
refId = int(stringKey)
return byId[refId].id
else:
raise Exception, "Unknown reference name: %s" % stringKey
def isLoaded():
return bool(byMD5)
def loadFromFile(filename, cmpH5):
"""
Reads reference from FASTA file, loading
lookup tables that can be used any time later.
"""
# Load contigs
assert not isLoaded()
f = FastaReader(filename)
numFastaRecords = 0
fastaChecksums = set()
for fastaRecord in f:
numFastaRecords += 1
md5sum = fastaRecord.md5
fastaChecksums.add(md5sum)
normalizedContigSequence = fastaRecord.sequence.upper()
if md5sum in cmpH5.referenceInfoTable.MD5:
cmpH5RefEntry = cmpH5.referenceInfo(md5sum)
refId = cmpH5RefEntry.ID
refName = fastaRecord.name
contig = ReferenceContig(refId, refName, md5sum,
np.array(normalizedContigSequence, dtype="c"),
len(normalizedContigSequence))
byId[refId] = contig
byName[refName] = contig
byMD5[contig.md5sum] = contig
logging.info("Loaded %d of %d reference groups from %s " %
(len(byId), numFastaRecords, filename))
# If the cmpH5 has alignments to contigs that weren't contained in
# the fasta file, report an error.
cmpH5Checksums = set(cmpH5.referenceInfoTable.MD5)
if not cmpH5Checksums.issubset(fastaChecksums):
logging.error("CmpH5 aligned to a contig not represented in FASTA file")
return 1
assert isLoaded()
def stringToWindow(s):
assert isLoaded()
if s is None:
return None
m = re.match("(.*):(.*)-(.*)", s)
if m:
refId = anyKeyToId(m.group(1))
refStart = int(m.group(2))
refEnd = min(int(m.group(3)), byId[refId].length)
else:
refId = anyKeyToId(s)
refStart = 0
refEnd = byId[refId].length
return (refId, refStart, refEnd)
def windowToString(referenceWindow):
assert isLoaded()
refId, refStart, refEnd = referenceWindow
return "%s:%d-%d" % (idToName(refId),
refStart,
refEnd)
def enumerateChunks(refId, referenceStride, referenceWindow=None, overlap=0):
"""
Enumerate all work chunks (restricted to the window, if provided).
"""
assert isLoaded()
assert (referenceWindow is None) or (refId == referenceWindow[0])
referenceEntry = byId[refId]
if referenceWindow:
_, start, end = referenceWindow
else:
start, end = (0, referenceEntry.length)
# The last chunk only needs to reach 'end-overlap', because it
# will be extended to 'end' -- this prevents the generation of
# multiple chunks covering the last few bases of the reference
# (fixes bug #21940)
for chunkBegin in xrange(start, end-overlap, referenceStride):
yield (refId,
max(chunkBegin - overlap, 0),
min(chunkBegin + referenceStride + overlap, referenceEntry.length))
def numChunks(refId, referenceStride, referenceWindow=None):
"""
How many chunks will there be for the given refId and window restriction?
"""
assert isLoaded()
assert (referenceWindow is None) or (refId == referenceWindow[0])
referenceEntry = byId[refId]
if referenceWindow:
_, start, end = referenceWindow
else:
start, end = (0, referenceEntry.length)
return int(math.ceil(float(end-start)/referenceStride))
def enumerateIds(referenceWindow=None):
"""
Enumerate all refIds (subject to the referenceWindow restriction, if provided).
"""
assert isLoaded()
if referenceWindow is None:
for refId in byId: yield refId
else:
refId, refStart, refEnd = referenceWindow
yield refId
| {
"content_hash": "fdb1dbc37ac6d97dd33ff537c907a3c8",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 83,
"avg_line_length": 34.445205479452056,
"alnum_prop": 0.6255716842314576,
"repo_name": "afif-elghraoui/CorrelatedVariants",
"id": "fdefad8eedd4792f788e6a5177c8e7423e9795d7",
"size": "6886",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RareVariants/reference.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "116240"
}
],
"symlink_target": ""
} |
import datetime
from mock import patch
from django.contrib.auth.models import User
from draalcore.test_utils.rest_api import AuthAPI
from draalcore.test_utils.basetest import BaseTest
from draalcore.auth.models import UserAccountProfile
class UserRegistrationTestCase(BaseTest):
ERROR_TEXT = 'Following data items are missing: username, email, password, first_name, last_name'
def basetest_initialize(self):
self.auth_api = AuthAPI(self)
def test_user_registration_no_parameters(self):
"""No user registration parameters are received"""
# GIVEN no registration parameters
data = {}
# WHEN registering new user
response = self.auth_api.register(data)
# THEN it should fail
self.assertTrue(response.error)
# AND error message is available
self.assertTrue('errors' in response.data)
self.assertEqual(response.data['errors'][0], self.ERROR_TEXT)
def test_user_registration_invalid_parameters(self):
"""Invalid user registration parameters are received"""
err_text = 'Data field \'username\' must be of type string'
# GIVEN invalid registration parameters
data = {
'username': 2, # Number, should be string
'password': 'password',
'email': 'test@test.com',
'first_name': 'test',
'last_name': 'test'
}
# WHEN registering new user
response = self.auth_api.register(data)
# THEN it should fail
self.assertTrue(response.error)
# AND error message is available
self.assertTrue('errors' in response.data)
self.assertEqual(response.data['errors'][0], err_text)
def _test_duplicate_username(self, username):
"""Username is already reserved for another user"""
data = {
'username': username,
'password': 'password',
'email': 'test@test.com',
'first_name': 'test',
'last_name': 'user'
}
# WHEN registering new user
response = self.auth_api.register(data)
# THEN it should fail
self.assertTrue(response.error)
error_text = 'Username {} is already reserved, please select another name'.format(username)
self.assertEqual(response.data['errors'][0], error_text)
def _test_activate_user(self, activation_key):
"""User account is activated"""
# GIVEN activation key for user
data = {'activation_key': activation_key}
accounts = UserAccountProfile.objects.all()
self.assertFalse(accounts[0].user.is_active)
# WHEN activating user
response = self.auth_api.activate_user(data)
# THEN it should succeed
self.assertTrue(response.success)
# AND user is activated
accounts = UserAccountProfile.objects.all()
self.assertTrue(accounts[0].user.is_active)
def _test_activate_user2(self, activation_key):
"""Activated user account is activated again"""
# GIVEN activation key for already activated user
data = {'activation_key': activation_key}
accounts = UserAccountProfile.objects.all()
self.assertTrue(accounts[0].user.is_active)
# WHEN activating user
response = self.auth_api.activate_user(data)
# THEN it should fail
self.assertTrue(response.error)
# AND error message is available
self.assertEqual(response.data['errors'][0], 'Activation key not found')
def _test_activate_user_key_expired(self, activation_key):
"""Activation key has expired"""
# GIVEN user account is not activated within allowed time window
user = User.objects.all()[0]
expiration_date = datetime.timedelta(days=180)
user.date_joined = user.date_joined - expiration_date
user.save()
# WHEN activating user account
data = {'activation_key': activation_key}
response = self.auth_api.activate_user(data)
# THEN it should fail
self.assertTrue(response.error)
# AND error message is available
self.assertEqual(response.data['errors'][0], 'Activation key expired')
user.date_joined = user.date_joined + expiration_date
user.save()
def _test_activate_user_invalid_key(self):
"""User account is activated with invalid key"""
# GIVEN user account is not activated within allowed time window
data = {'activation_key': 'abcdefg'}
# WHEN activating user account
response = self.auth_api.activate_user(data)
# THEN it should fail
self.assertTrue(response.error)
# AND error message is available
self.assertEqual(response.data['errors'][0], 'Invalid activation key')
@patch('draalcore.mailer.send_mail')
def test_user_registration(self, mailer_mock):
"""User registration parameters are received"""
mailer_mock.return_value = True
# GIVEN registration parameters
data = {
'username': 'testuser',
'password': 'password',
'email': 'test@test.com',
'first_name': 'test',
'last_name': 'user'
}
# WHEN registering new user
response = self.auth_api.register(data)
# THEN it should succeed
self.assertTrue(response.success)
# AND user account is available
accounts = UserAccountProfile.objects.all()
activation_key = accounts[0].activation_key
self.assertEqual(accounts.count(), 1)
self.assertEqual(accounts[0].user.username, data['username'])
self.assertEqual(accounts[0].user.email, data['email'])
# AND email is sent to user
self.assertEqual(mailer_mock.call_count, 1)
self._test_duplicate_username(data['username'])
self._test_activate_user_invalid_key()
self._test_activate_user_key_expired(activation_key)
self._test_activate_user(activation_key)
self._test_activate_user2(activation_key)
| {
"content_hash": "331df9196d7b2bf2cbef3a8ed9fa9079",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 101,
"avg_line_length": 33.333333333333336,
"alnum_prop": 0.6321311475409837,
"repo_name": "jojanper/draalcore",
"id": "caa7e7a85635f9c35e9b9a5cd4ac799828e08729",
"size": "6147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "draalcore/auth/tests/test_user_registration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "749"
},
{
"name": "JavaScript",
"bytes": "3569"
},
{
"name": "Python",
"bytes": "327792"
}
],
"symlink_target": ""
} |
import functools
import inspect
import unittest
from pecan import expose
from pecan import util
class TestArgSpec(unittest.TestCase):
@property
def controller(self):
class RootController(object):
@expose()
def index(self, a, b, c=1, *args, **kwargs):
return 'Hello, World!'
@staticmethod
@expose()
def static_index(a, b, c=1, *args, **kwargs):
return 'Hello, World!'
return RootController()
def test_no_decorator(self):
expected = inspect.getargspec(self.controller.index.__func__)
actual = util.getargspec(self.controller.index.__func__)
assert expected == actual
expected = inspect.getargspec(self.controller.static_index)
actual = util.getargspec(self.controller.static_index)
assert expected == actual
def test_simple_decorator(self):
def dec(f):
return f
expected = inspect.getargspec(self.controller.index.__func__)
actual = util.getargspec(dec(self.controller.index.__func__))
assert expected == actual
expected = inspect.getargspec(self.controller.static_index)
actual = util.getargspec(dec(self.controller.static_index))
assert expected == actual
def test_simple_wrapper(self):
def dec(f):
@functools.wraps(f)
def wrapped(*a, **kw):
return f(*a, **kw)
return wrapped
expected = inspect.getargspec(self.controller.index.__func__)
actual = util.getargspec(dec(self.controller.index.__func__))
assert expected == actual
expected = inspect.getargspec(self.controller.static_index)
actual = util.getargspec(dec(self.controller.static_index))
assert expected == actual
def test_multiple_decorators(self):
def dec(f):
@functools.wraps(f)
def wrapped(*a, **kw):
return f(*a, **kw)
return wrapped
expected = inspect.getargspec(self.controller.index.__func__)
actual = util.getargspec(dec(dec(dec(self.controller.index.__func__))))
assert expected == actual
expected = inspect.getargspec(self.controller.static_index)
actual = util.getargspec(dec(dec(dec(
self.controller.static_index))))
assert expected == actual
def test_decorator_with_args(self):
def dec(flag):
def inner(f):
@functools.wraps(f)
def wrapped(*a, **kw):
return f(*a, **kw)
return wrapped
return inner
expected = inspect.getargspec(self.controller.index.__func__)
actual = util.getargspec(dec(True)(self.controller.index.__func__))
assert expected == actual
expected = inspect.getargspec(self.controller.static_index)
actual = util.getargspec(dec(True)(
self.controller.static_index))
assert expected == actual
def test_nested_cells(self):
def before(handler):
def deco(f):
def wrapped(*args, **kwargs):
if callable(handler):
handler()
return f(*args, **kwargs)
return wrapped
return deco
class RootController(object):
@expose()
@before(lambda: True)
def index(self, a, b, c):
return 'Hello, World!'
argspec = util._cfg(RootController.index)['argspec']
assert argspec.args == ['self', 'a', 'b', 'c']
def test_class_based_decorator(self):
class deco(object):
def __init__(self, arg):
self.arg = arg
def __call__(self, f):
@functools.wraps(f)
def wrapper(*args, **kw):
assert self.arg == '12345'
return f(*args, **kw)
return wrapper
class RootController(object):
@expose()
@deco('12345')
def index(self, a, b, c):
return 'Hello, World!'
argspec = util._cfg(RootController.index)['argspec']
assert argspec.args == ['self', 'a', 'b', 'c']
| {
"content_hash": "1c7b832296125a02fae84e9917bdb987",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 79,
"avg_line_length": 31.144927536231883,
"alnum_prop": 0.5516519311307585,
"repo_name": "jdandrea/pecan",
"id": "1e59ad0a30d316aea83711e8559e8d9ced91b406",
"size": "4298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pecan/tests/test_util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "569"
},
{
"name": "HTML",
"bytes": "4281"
},
{
"name": "Python",
"bytes": "456033"
}
],
"symlink_target": ""
} |
"""
The input generator
===================
"""
import argparse
from .simultask import gen_simul_task_from_YAML
from .gulpinter import COMPUTE_PARAM_FUNCS, GET_RES_FUNCS
def gen_main():
"""The main function for the input generator
"""
# Parse the command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument('input', metavar='INP', nargs=1,
type=argparse.FileType('r'),
help='The name of the JSON/YAML input file')
args = parser.parse_args()
# Generate the simulation task object from the input file.
simul_task = gen_simul_task_from_YAML(
args.input[0], COMPUTE_PARAM_FUNCS, GET_RES_FUNCS
)
# Generate the input files for the simulation tasks.
simul_task.gen_inp()
return 0
| {
"content_hash": "116e62252cbebfa7faae252c0ba64e2e",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 68,
"avg_line_length": 24.545454545454547,
"alnum_prop": 0.6259259259259259,
"repo_name": "tschijnmo/GCMCbyGULP",
"id": "bbab0d91c228b23225906195d3254bbb3a04ce3b",
"size": "810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GCMCbyGULP/geninp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "455"
},
{
"name": "Python",
"bytes": "36235"
}
],
"symlink_target": ""
} |
from ._VerticalMeasurements import *
from ._VelodynePacket import *
| {
"content_hash": "82b463d45be51a4fbb5c0e1d66a66042",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 36,
"avg_line_length": 34,
"alnum_prop": 0.7941176470588235,
"repo_name": "WuNL/mylaptop",
"id": "d3cd52a953271b311bf5d7423530b0942e8bfebd",
"size": "68",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "install/lib/python2.7/dist-packages/velodyne0/msg/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "189211"
},
{
"name": "Common Lisp",
"bytes": "153266"
},
{
"name": "Python",
"bytes": "98976"
},
{
"name": "Shell",
"bytes": "14892"
}
],
"symlink_target": ""
} |
from types import CodeType
from typing import Any, Optional
import typing
if typing.TYPE_CHECKING:
from . import sketch # noqa: F401
from . import statements # noqa: F401
__all__ = ["PythonPrinter"]
class PythonPrinter:
"""
Print Python code with indentation gracefully.
"""
def __init__(
self,
path: str = "<string>",
end: str = "\n",
indent_mark: str = " " * 4,
) -> None:
self._path = path
self._indent_num = 0
self._committed_code = ""
self._end = end
self._indent_mark = indent_mark
self._finished = False
def writeline(
self, line: str, stmt: Optional["statements.AppendMixIn"] = None
) -> None:
"""
Write a line with indent.
"""
assert not self._finished, "Code Generation has already been finished."
if stmt:
line += f" # in file {self._path} at line {stmt.line_no}."
final_line = self._indent_mark * self._indent_num + line + self._end
self._committed_code += final_line
def indent_block(self) -> "PythonPrinter":
"""
Indent the code with `with` statement.
Example::
printer.writeline("def a():")
with printer.indent_block():
printer.writeline("return \"Text from function a.\"")
printer.writeline("a()")
"""
assert not self._finished, "Code Generation has already been finished."
return self
def _inc_indent_num(self) -> None:
assert not self._finished, "Code Generation has already been finished."
self._indent_num += 1
def _dec_indent_num(self) -> None:
assert not self._finished, "Code Generation has already been finished."
self.writeline("pass")
self._indent_num -= 1
def __enter__(self) -> None:
self._inc_indent_num()
def __exit__(self, *exc: Any) -> None:
self._dec_indent_num()
@property
def finished(self) -> bool: # pragma: no cover
return self._finished
@property
def plain_code(self) -> str:
"""
Return the plain, printed code.
"""
self._finished = True
return self._committed_code
@property
def compiled_code(self) -> CodeType:
"""
Return compiled code.
"""
if not hasattr(self, "_compiled_code"):
self._compiled_code = compile(
self.plain_code, self._path, "exec", dont_inherit=True
)
return self._compiled_code # type: ignore
@classmethod
def print_sketch(cls, skt: "sketch.Sketch") -> CodeType:
py_printer = cls(path=skt._path)
skt._root.print_code(py_printer)
return py_printer.compiled_code
| {
"content_hash": "0ece787312cb542a662af24209069d9e",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 79,
"avg_line_length": 26.752380952380953,
"alnum_prop": 0.5567817728729085,
"repo_name": "futursolo/sketchbook",
"id": "cc16b4adfd46d0def47d48aef3b9c9b7a6ce39cb",
"size": "3458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sketchbook/printer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "529"
},
{
"name": "Python",
"bytes": "82985"
}
],
"symlink_target": ""
} |
"""This module exports the Lintr plugin class."""
from SublimeLinter.lint import Linter, util
class Lintr(Linter):
"""Provides an interface to lintr R package."""
defaults = {
'linters': 'default_linters',
'cache': 'TRUE',
'selector': 'source.r'
}
regex = (
r'^.+?:(?P<line>\d+):(?P<col>\d+): '
r'(?:(?P<error>error)|(?P<warning>warning|style)): '
r'(?P<message>.+)'
)
multiline = False
line_col_base = (1, 1)
tempfile_suffix = None
error_stream = util.STREAM_BOTH
word_re = None
tempfile_suffix = 'lintr'
def cmd(self):
"""Return a list with the command line to execute."""
settings = self.settings
tmp = settings.context['TMPDIR']
linters = self.defaults['linters']
command = "library(lintr);lint(cache = '{0}', commandArgs(TRUE), {1})".format(tmp,
linters)
return ['r',
'--slave',
'--restore',
'--no-save',
'-e',
command,
'--args',
'${temp_file}']
| {
"content_hash": "0e9430e41277bd842c129344e853038b",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 94,
"avg_line_length": 29.24390243902439,
"alnum_prop": 0.4645537948290242,
"repo_name": "jimhester/SublimeLinter-contrib-lintr",
"id": "441855ddb7dcfdcc2b88ca8565488f6fcfe40062",
"size": "1369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "linter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1663"
}
],
"symlink_target": ""
} |
import gzip
from deep_qa.models.memory_networks.differentiable_search import DifferentiableSearchMemoryNetwork
from deep_qa.data.instances.true_false_instance import TrueFalseInstance
from ...common.test_case import DeepQaTestCase
class FakeEncoder:
def predict(self, instances):
predictions = []
for _ in instances:
predictions.append([1.0, -1.0, 0.5, 0.25])
return predictions
class TestDifferentiableSearchMemoryNetwork(DeepQaTestCase):
# pylint: disable=protected-access
def setUp(self):
super(TestDifferentiableSearchMemoryNetwork, self).setUp()
self.corpus_path = self.TEST_DIR + 'FAKE_corpus.gz'
with gzip.open(self.corpus_path, 'wb') as corpus_file:
corpus_file.write('this is a sentence\n'.encode('utf-8'))
corpus_file.write('this is another sentence\n'.encode('utf-8'))
corpus_file.write('a really great sentence\n'.encode('utf-8'))
corpus_file.write('scientists study animals\n'.encode('utf-8'))
def test_initialize_lsh_does_not_crash(self):
args = {
'corpus_path': self.corpus_path,
'model_serialization_prefix': './',
'num_sentence_words': 3,
}
model = self.get_model(DifferentiableSearchMemoryNetwork, args)
model.encoder_model = FakeEncoder()
model._initialize_lsh()
def test_get_nearest_neighbors_does_not_crash(self):
args = {
'corpus_path': self.corpus_path,
'model_serialization_prefix': './',
'num_sentence_words': 5,
}
model = self.get_model(DifferentiableSearchMemoryNetwork, args)
model.encoder_model = FakeEncoder()
model._initialize_lsh()
model.num_sentence_words = 5
model.max_knowledge_length = 2
model.get_nearest_neighbors(TrueFalseInstance("this is a sentence", True))
| {
"content_hash": "57f198534af647027415f147b6547814",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 98,
"avg_line_length": 42.34782608695652,
"alnum_prop": 0.6339835728952772,
"repo_name": "RTHMaK/RPGOne",
"id": "90a30a775a7b272662fbc9555a75d1cf90880009",
"size": "1978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deep_qa-master/tests/models/memory_networks/differentiable_search_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "1C Enterprise",
"bytes": "36"
},
{
"name": "Batchfile",
"bytes": "15029"
},
{
"name": "CSS",
"bytes": "41709"
},
{
"name": "Erlang",
"bytes": "39438"
},
{
"name": "Go",
"bytes": "287"
},
{
"name": "HTML",
"bytes": "633076"
},
{
"name": "JavaScript",
"bytes": "1128791"
},
{
"name": "Jupyter Notebook",
"bytes": "927247"
},
{
"name": "Makefile",
"bytes": "31756"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Matlab",
"bytes": "9454"
},
{
"name": "PHP",
"bytes": "708541"
},
{
"name": "PowerShell",
"bytes": "68503"
},
{
"name": "Python",
"bytes": "2278740"
},
{
"name": "Ruby",
"bytes": "1136"
},
{
"name": "Shell",
"bytes": "62555"
},
{
"name": "Smarty",
"bytes": "5752"
},
{
"name": "TeX",
"bytes": "34544"
}
],
"symlink_target": ""
} |
import copy
import json
import os
import re
import frappe
from frappe import _, get_module_path
from frappe.core.doctype.access_log.access_log import make_access_log
from frappe.core.doctype.document_share_key.document_share_key import is_expired
from frappe.utils import cint, sanitize_html, strip_html
from frappe.utils.jinja_globals import is_rtl
no_cache = 1
standard_format = "templates/print_formats/standard.html"
def get_context(context):
"""Build context for print"""
if not ((frappe.form_dict.doctype and frappe.form_dict.name) or frappe.form_dict.doc):
return {
"body": sanitize_html(
"""<h1>Error</h1>
<p>Parameters doctype and name required</p>
<pre>%s</pre>"""
% repr(frappe.form_dict)
)
}
if frappe.form_dict.doc:
doc = frappe.form_dict.doc
else:
doc = frappe.get_doc(frappe.form_dict.doctype, frappe.form_dict.name)
settings = frappe.parse_json(frappe.form_dict.settings)
letterhead = frappe.form_dict.letterhead or None
meta = frappe.get_meta(doc.doctype)
print_format = get_print_format_doc(None, meta=meta)
make_access_log(
doctype=frappe.form_dict.doctype, document=frappe.form_dict.name, file_type="PDF", method="Print"
)
print_style = None
body = get_rendered_template(
doc,
print_format=print_format,
meta=meta,
trigger_print=frappe.form_dict.trigger_print,
no_letterhead=frappe.form_dict.no_letterhead,
letterhead=letterhead,
settings=settings,
)
print_style = get_print_style(frappe.form_dict.style, print_format)
return {
"body": body,
"print_style": print_style,
"comment": frappe.session.user,
"title": frappe.utils.strip_html(doc.get_title() or doc.name),
"lang": frappe.local.lang,
"layout_direction": "rtl" if is_rtl() else "ltr",
"doctype": frappe.form_dict.doctype,
"name": frappe.form_dict.name,
"key": frappe.form_dict.get("key"),
}
def get_print_format_doc(print_format_name, meta):
"""Returns print format document"""
if not print_format_name:
print_format_name = frappe.form_dict.format or meta.default_print_format or "Standard"
if print_format_name == "Standard":
return None
else:
try:
return frappe.get_doc("Print Format", print_format_name)
except frappe.DoesNotExistError:
# if old name, return standard!
return None
def get_rendered_template(
doc,
name=None,
print_format=None,
meta=None,
no_letterhead=None,
letterhead=None,
trigger_print=False,
settings=None,
):
print_settings = frappe.get_single("Print Settings").as_dict()
print_settings.update(settings or {})
if isinstance(no_letterhead, str):
no_letterhead = cint(no_letterhead)
elif no_letterhead is None:
no_letterhead = not cint(print_settings.with_letterhead)
doc.flags.in_print = True
doc.flags.print_settings = print_settings
if not frappe.flags.ignore_print_permissions:
validate_print_permission(doc)
if doc.meta.is_submittable:
if doc.docstatus == 0 and not cint(print_settings.allow_print_for_draft):
frappe.throw(_("Not allowed to print draft documents"), frappe.PermissionError)
if doc.docstatus == 2 and not cint(print_settings.allow_print_for_cancelled):
frappe.throw(_("Not allowed to print cancelled documents"), frappe.PermissionError)
doc.run_method("before_print", print_settings)
if not hasattr(doc, "print_heading"):
doc.print_heading = None
if not hasattr(doc, "sub_heading"):
doc.sub_heading = None
if not meta:
meta = frappe.get_meta(doc.doctype)
jenv = frappe.get_jenv()
format_data, format_data_map = [], {}
# determine template
if print_format:
doc.print_section_headings = print_format.show_section_headings
doc.print_line_breaks = print_format.line_breaks
doc.align_labels_right = print_format.align_labels_right
doc.absolute_value = print_format.absolute_value
def get_template_from_string():
return jenv.from_string(get_print_format(doc.doctype, print_format))
if print_format.custom_format:
template = get_template_from_string()
elif print_format.format_data:
# set format data
format_data = json.loads(print_format.format_data)
for df in format_data:
format_data_map[df.get("fieldname")] = df
if "visible_columns" in df:
for _df in df.get("visible_columns"):
format_data_map[_df.get("fieldname")] = _df
doc.format_data_map = format_data_map
template = "standard"
elif print_format.standard == "Yes":
template = get_template_from_string()
else:
# fallback
template = "standard"
else:
template = "standard"
if template == "standard":
template = jenv.get_template(standard_format)
letter_head = frappe._dict(get_letter_head(doc, no_letterhead, letterhead) or {})
if letter_head.content:
letter_head.content = frappe.utils.jinja.render_template(
letter_head.content, {"doc": doc.as_dict()}
)
if letter_head.footer:
letter_head.footer = frappe.utils.jinja.render_template(
letter_head.footer, {"doc": doc.as_dict()}
)
convert_markdown(doc, meta)
args = {}
# extract `print_heading_template` from the first field and remove it
if format_data and format_data[0].get("fieldname") == "print_heading_template":
args["print_heading_template"] = format_data.pop(0).get("options")
args.update(
{
"doc": doc,
"meta": frappe.get_meta(doc.doctype),
"layout": make_layout(doc, meta, format_data),
"no_letterhead": no_letterhead,
"trigger_print": cint(trigger_print),
"letter_head": letter_head.content,
"footer": letter_head.footer,
"print_settings": print_settings,
}
)
html = template.render(args, filters={"len": len})
if cint(trigger_print):
html += trigger_print_script
return html
def set_link_titles(doc):
# Adds name with title of link field doctype to __link_titles
if not doc.get("__link_titles"):
setattr(doc, "__link_titles", {})
meta = frappe.get_meta(doc.doctype)
set_title_values_for_link_and_dynamic_link_fields(meta, doc)
set_title_values_for_table_and_multiselect_fields(meta, doc)
def set_title_values_for_link_and_dynamic_link_fields(meta, doc, parent_doc=None):
if parent_doc and not parent_doc.get("__link_titles"):
setattr(parent_doc, "__link_titles", {})
elif doc and not doc.get("__link_titles"):
setattr(doc, "__link_titles", {})
for field in meta.get_link_fields() + meta.get_dynamic_link_fields():
if not doc.get(field.fieldname):
continue
# If link field, then get doctype from options
# If dynamic link field, then get doctype from dependent field
doctype = field.options if field.fieldtype == "Link" else doc.get(field.options)
meta = frappe.get_meta(doctype)
if not meta or not (meta.title_field and meta.show_title_field_in_link):
continue
link_title = frappe.get_cached_value(doctype, doc.get(field.fieldname), meta.title_field)
if parent_doc:
parent_doc.__link_titles[f"{doctype}::{doc.get(field.fieldname)}"] = link_title
elif doc:
doc.__link_titles[f"{doctype}::{doc.get(field.fieldname)}"] = link_title
def set_title_values_for_table_and_multiselect_fields(meta, doc):
for field in meta.get_table_fields():
if not doc.get(field.fieldname):
continue
_meta = frappe.get_meta(field.options)
for value in doc.get(field.fieldname):
set_title_values_for_link_and_dynamic_link_fields(_meta, value, doc)
def convert_markdown(doc, meta):
"""Convert text field values to markdown if necessary"""
for field in meta.fields:
if field.fieldtype == "Text Editor":
value = doc.get(field.fieldname)
if value and "<!-- markdown -->" in value:
doc.set(field.fieldname, frappe.utils.md_to_html(value))
@frappe.whitelist()
def get_html_and_style(
doc,
name=None,
print_format=None,
meta=None,
no_letterhead=None,
letterhead=None,
trigger_print=False,
style=None,
settings=None,
templates=None,
):
"""Returns `html` and `style` of print format, used in PDF etc"""
if isinstance(doc, str) and isinstance(name, str):
doc = frappe.get_doc(doc, name)
if isinstance(doc, str):
doc = frappe.get_doc(json.loads(doc))
print_format = get_print_format_doc(print_format, meta=meta or frappe.get_meta(doc.doctype))
set_link_titles(doc)
try:
html = get_rendered_template(
doc,
name=name,
print_format=print_format,
meta=meta,
no_letterhead=no_letterhead,
letterhead=letterhead,
trigger_print=trigger_print,
settings=frappe.parse_json(settings),
)
except frappe.TemplateNotFoundError:
frappe.clear_last_message()
html = None
return {"html": html, "style": get_print_style(style=style, print_format=print_format)}
@frappe.whitelist()
def get_rendered_raw_commands(doc, name=None, print_format=None, meta=None, lang=None):
"""Returns Rendered Raw Commands of print format, used to send directly to printer"""
if isinstance(doc, str) and isinstance(name, str):
doc = frappe.get_doc(doc, name)
if isinstance(doc, str):
doc = frappe.get_doc(json.loads(doc))
print_format = get_print_format_doc(print_format, meta=meta or frappe.get_meta(doc.doctype))
if not print_format or (print_format and not print_format.raw_printing):
frappe.throw(
_("{0} is not a raw printing format.").format(print_format), frappe.TemplateNotFoundError
)
return {
"raw_commands": get_rendered_template(doc, name=name, print_format=print_format, meta=meta)
}
def validate_print_permission(doc):
for ptype in ("read", "print"):
if frappe.has_permission(doc.doctype, ptype, doc) or frappe.has_website_permission(doc):
return
key = frappe.form_dict.key
if key and isinstance(key, str):
validate_key(key, doc)
else:
raise frappe.PermissionError(_("You do not have permission to view this document"))
def validate_key(key, doc):
document_key_expiry = frappe.get_cached_value(
"Document Share Key",
{"reference_doctype": doc.doctype, "reference_docname": doc.name, "key": key},
["expires_on"],
)
if document_key_expiry is not None:
if is_expired(document_key_expiry[0]):
raise frappe.exceptions.LinkExpired
else:
return
# TODO: Deprecate this! kept it for backward compatibility
if frappe.get_system_settings("allow_older_web_view_links") and key == doc.get_signature():
return
raise frappe.exceptions.InvalidKeyError
def get_letter_head(doc, no_letterhead, letterhead=None):
if no_letterhead:
return {}
if letterhead:
return frappe.db.get_value("Letter Head", letterhead, ["content", "footer"], as_dict=True)
if doc.get("letter_head"):
return frappe.db.get_value("Letter Head", doc.letter_head, ["content", "footer"], as_dict=True)
else:
return (
frappe.db.get_value("Letter Head", {"is_default": 1}, ["content", "footer"], as_dict=True) or {}
)
def get_print_format(doctype, print_format):
if print_format.disabled:
frappe.throw(
_("Print Format {0} is disabled").format(print_format.name), frappe.DoesNotExistError
)
# server, find template
module = print_format.module or frappe.db.get_value("DocType", doctype, "module")
path = os.path.join(
get_module_path(module, "Print Format", print_format.name),
frappe.scrub(print_format.name) + ".html",
)
if os.path.exists(path):
with open(path) as pffile:
return pffile.read()
else:
if print_format.raw_printing:
return print_format.raw_commands
if print_format.html:
return print_format.html
frappe.throw(_("No template found at path: {0}").format(path), frappe.TemplateNotFoundError)
def make_layout(doc, meta, format_data=None):
"""Builds a hierarchical layout object from the fields list to be rendered
by `standard.html`
:param doc: Document to be rendered.
:param meta: Document meta object (doctype).
:param format_data: Fields sequence and properties defined by Print Format Builder."""
layout, page = [], []
layout.append(page)
def get_new_section():
return {"columns": [], "has_data": False}
def append_empty_field_dict_to_page_column(page):
"""append empty columns dict to page layout"""
if not page[-1]["columns"]:
page[-1]["columns"].append({"fields": []})
for df in format_data or meta.fields:
if format_data:
# embellish df with original properties
df = frappe._dict(df)
if df.fieldname:
original = meta.get_field(df.fieldname)
if original:
newdf = original.as_dict()
newdf.hide_in_print_layout = original.get("hide_in_print_layout")
newdf.update(df)
df = newdf
df.print_hide = 0
if df.fieldtype == "Section Break" or page == []:
if len(page) > 1:
if page[-1]["has_data"] == False:
# truncate last section if empty
del page[-1]
section = get_new_section()
if df.fieldtype == "Section Break" and df.label:
section["label"] = df.label
page.append(section)
elif df.fieldtype == "Column Break":
# if last column break and last column is not empty
page[-1]["columns"].append({"fields": []})
else:
# add a column if not yet added
append_empty_field_dict_to_page_column(page)
if df.fieldtype == "HTML" and df.options:
doc.set(df.fieldname, True) # show this field
if df.fieldtype == "Signature" and not doc.get(df.fieldname):
placeholder_image = "/assets/frappe/images/signature-placeholder.png"
doc.set(df.fieldname, placeholder_image)
if is_visible(df, doc) and has_value(df, doc):
append_empty_field_dict_to_page_column(page)
page[-1]["columns"][-1]["fields"].append(df)
# section has fields
page[-1]["has_data"] = True
# if table, add the row info in the field
# if a page break is found, create a new docfield
if df.fieldtype == "Table":
df.rows = []
df.start = 0
df.end = None
for i, row in enumerate(doc.get(df.fieldname)):
if row.get("page_break"):
# close the earlier row
df.end = i
# new page, with empty section and column
page = [get_new_section()]
layout.append(page)
append_empty_field_dict_to_page_column(page)
# continue the table in a new page
df = copy.copy(df)
df.start = i
df.end = None
page[-1]["columns"][-1]["fields"].append(df)
return layout
def is_visible(df, doc):
"""Returns True if docfield is visible in print layout and does not have print_hide set."""
if df.fieldtype in ("Section Break", "Column Break", "Button"):
return False
if (df.permlevel or 0) > 0 and not doc.has_permlevel_access_to(df.fieldname, df):
return False
return not doc.is_print_hide(df.fieldname, df)
def has_value(df, doc):
value = doc.get(df.fieldname)
if value in (None, ""):
return False
elif isinstance(value, str) and not strip_html(value).strip():
if df.fieldtype in ["Text", "Text Editor"]:
return True
return False
elif isinstance(value, list) and not len(value):
return False
return True
def get_print_style(style=None, print_format=None, for_legacy=False):
print_settings = frappe.get_doc("Print Settings")
if not style:
style = print_settings.print_style or ""
context = {
"print_settings": print_settings,
"print_style": style,
"font": get_font(print_settings, print_format, for_legacy),
}
css = frappe.get_template("templates/styles/standard.css").render(context)
if style and frappe.db.exists("Print Style", style):
css = css + "\n" + frappe.db.get_value("Print Style", style, "css")
# move @import to top
for at_import in list(set(re.findall(r"(@import url\([^\)]+\)[;]?)", css))):
css = css.replace(at_import, "")
# prepend css with at_import
css = at_import + css
if print_format and print_format.css:
css += "\n\n" + print_format.css
return css
def get_font(print_settings, print_format=None, for_legacy=False):
default = 'Inter, "Helvetica Neue", Helvetica, Arial, "Open Sans", sans-serif'
if for_legacy:
return default
font = None
if print_format:
if print_format.font and print_format.font != "Default":
font = f"{print_format.font}, sans-serif"
if not font:
if print_settings.font and print_settings.font != "Default":
font = f"{print_settings.font}, sans-serif"
else:
font = default
return font
def get_visible_columns(data, table_meta, df):
"""Returns list of visible columns based on print_hide and if all columns have value."""
columns = []
doc = data[0] or frappe.new_doc(df.options)
hide_in_print_layout = df.get("hide_in_print_layout") or []
def add_column(col_df):
if col_df.fieldname in hide_in_print_layout:
return False
return is_visible(col_df, doc) and column_has_value(data, col_df.get("fieldname"), col_df)
if df.get("visible_columns"):
# columns specified by column builder
for col_df in df.get("visible_columns"):
# load default docfield properties
docfield = table_meta.get_field(col_df.get("fieldname"))
if not docfield:
continue
newdf = docfield.as_dict().copy()
newdf.update(col_df)
if add_column(newdf):
columns.append(newdf)
else:
for col_df in table_meta.fields:
if add_column(col_df):
columns.append(col_df)
return columns
def column_has_value(data, fieldname, col_df):
"""Check if at least one cell in column has non-zero and non-blank value"""
has_value = False
if col_df.fieldtype in ["Float", "Currency"] and not col_df.print_hide_if_no_value:
return True
for row in data:
value = row.get(fieldname)
if value:
if isinstance(value, str):
if strip_html(value).strip():
has_value = True
break
else:
has_value = True
break
return has_value
trigger_print_script = """
<script>
//allow wrapping of long tr
var elements = document.getElementsByTagName("tr");
var i = elements.length;
while (i--) {
if(elements[i].clientHeight>300){
elements[i].setAttribute("style", "page-break-inside: auto;");
}
}
window.print();
// close the window after print
// NOTE: doesn't close if print is cancelled in Chrome
// Changed timeout to 5s from 1s because it blocked mobile view rendering
setTimeout(function() {
window.close();
}, 5000);
</script>
"""
| {
"content_hash": "459ed55bea466a498d2f388e2532b091",
"timestamp": "",
"source": "github",
"line_count": 641,
"max_line_length": 99,
"avg_line_length": 27.764430577223088,
"alnum_prop": 0.695117154576614,
"repo_name": "frappe/frappe",
"id": "faf6a02067a6ae1c93cc6624adc0655d1af64192",
"size": "17895",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "frappe/www/printview.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "65093"
},
{
"name": "HTML",
"bytes": "250850"
},
{
"name": "JavaScript",
"bytes": "2523337"
},
{
"name": "Less",
"bytes": "10921"
},
{
"name": "Python",
"bytes": "3618097"
},
{
"name": "SCSS",
"bytes": "261690"
},
{
"name": "Vue",
"bytes": "98456"
}
],
"symlink_target": ""
} |
from gem5stats import log
from gem5stats import logquery
from gem5stats.util import BufferedISlice
import sys
import os
import argparse
def main():
parser = argparse.ArgumentParser(description='Plot a time series from a gem5 log.')
parser.add_argument('log', metavar='LOG', type=argparse.FileType('r'),
help='Log file')
parser.add_argument('fun', metavar='FUN', type=str, nargs='+',
help='Function to plot')
parser.add_argument('--fs', metavar='C', type=str,
default=":",
help='Field separator')
parser.add_argument("--last", action="store_true", default=False,
help="Only print the last entry")
parser.add_argument("--start", metavar="NUM", type=int, default=0,
help="Skip the first NUM entries")
parser.add_argument("--stop", metavar="NUM", type=int, default=None,
help="Stop after NUM entries")
parser.add_argument("--step", metavar="N", type=int, default=1,
help="Use every N windows")
args = parser.parse_args()
funs = []
for fun in args.fun:
funs.append(logquery.eval_fun(fun))
for no, fun in enumerate(funs):
print "# %i: %s" % (no, fun)
out = []
stream = BufferedISlice(log.stream_log(args.log),
start=args.start, stop=args.stop,
step=args.step)
for step in stream:
if isinstance(step, tuple):
step = step[0]
out = [ f(step) for f in funs ]
if not args.last:
print args.fs.join([ str(s) for s in out ])
if args.last:
print args.fs.join([ str(s) for s in out ])
if __name__ == "__main__":
main()
| {
"content_hash": "ef60cb90855a6ae63e693812dc5d0800",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 87,
"avg_line_length": 32.78181818181818,
"alnum_prop": 0.5490848585690515,
"repo_name": "andysan/gem5utils",
"id": "bd35fca7a49e984d2196cf5e06721c602fa7bb31",
"size": "3380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "query.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "32078"
}
],
"symlink_target": ""
} |
u"""
:Copyright:
Copyright 2015 - 2022
Andr\xe9 Malo or his licensors, as applicable
:License:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===================
pyliblinear Tests
===================
pyliblinear Tests.
"""
__author__ = u"Andr\xe9 Malo"
| {
"content_hash": "ce007b0da541ad01036d9c6cbf5835df",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 73,
"avg_line_length": 27.666666666666668,
"alnum_prop": 0.7188755020080321,
"repo_name": "ndparker/pyliblinear",
"id": "730ebd74dbbfc8436e5ff7379147787f2bdaba7f",
"size": "771",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "194468"
},
{
"name": "C++",
"bytes": "86139"
},
{
"name": "Python",
"bytes": "66167"
},
{
"name": "Shell",
"bytes": "2887"
}
],
"symlink_target": ""
} |
"""
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
import socket
import os
from .base import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='hLH:l~+G_<9*Kjv??7d@g`,J~Ge&~SyfW(%Qi1DyfD|{KrKUKb')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_PORT = 1025
EMAIL_HOST = 'localhost'
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware', ]
INSTALLED_APPS += ['debug_toolbar', ]
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]
# tricks to have debug toolbar when developing with docker
if os.environ.get('USE_DOCKER') == 'yes':
ip = socket.gethostbyname(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + '1']
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['django_extensions', ]
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
| {
"content_hash": "50d98eb1d6e8089de9426bfa58b21c12",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 99,
"avg_line_length": 30.36986301369863,
"alnum_prop": 0.48308525033829497,
"repo_name": "tkovalsky/folio",
"id": "1555fb30c142f34888806aff9017500f026fa032",
"size": "2241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/settings/local.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17991"
},
{
"name": "HTML",
"bytes": "17824"
},
{
"name": "JavaScript",
"bytes": "2675"
},
{
"name": "Python",
"bytes": "43989"
},
{
"name": "Shell",
"bytes": "4232"
}
],
"symlink_target": ""
} |
'''
Contains PidIo() class
Typical contents of /proc/<pid>/io file::
rchar: 143848377
wchar: 4254218
syscr: 98216
syscw: 31339
read_bytes: 270336
write_bytes: 3489792
cancelled_write_bytes: 974848
'''
from logging import getLogger
from os import path as ospath
from re import compile as recompile
from .readfile import ReadFile
LOGGER = getLogger(__name__)
class PidIo(ReadFile):
'''
PidIo handling
'''
FILENAME = ospath.join('proc', '%s', 'io')
KEY = 'pidio'
REGEX = recompile('^[a-zA-Z_]+:')
def normalize(self):
'''
Translates data into dictionary
The /proc/<pid>/io file is a number records keyed on ':' separator
'''
LOGGER.debug("Normalize")
lines = self.lines
ret = {}
for line in lines:
if self.REGEX.match(line):
key, vals = line.split(':')
val = int(vals.strip())
ret[key] = val
return ret
| {
"content_hash": "d579275b2adb2fc5f6e247bae58b60b1",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 74,
"avg_line_length": 21.085106382978722,
"alnum_prop": 0.5751765893037336,
"repo_name": "eccles/lnxproc",
"id": "556818908c084d7b116780502252359800750450",
"size": "991",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lnxproc/pidio.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2465"
},
{
"name": "Python",
"bytes": "111336"
},
{
"name": "Shell",
"bytes": "3691"
}
],
"symlink_target": ""
} |
import sys
import get_dictionary
# tokens is a list of tokens, so no need to split it again
def unkify(tokens, words_dict):
final = []
for token in tokens:
# only process the train singletons and unknown words
if len(token.rstrip()) == 0:
final.append('UNK')
elif not(token.rstrip() in words_dict):
numCaps = 0
hasDigit = False
hasDash = False
hasLower = False
for char in token.rstrip():
if char.isdigit():
hasDigit = True
elif char == '-':
hasDash = True
elif char.isalpha():
if char.islower():
hasLower = True
elif char.isupper():
numCaps += 1
result = 'UNK'
lower = token.rstrip().lower()
ch0 = token.rstrip()[0]
if ch0.isupper():
if numCaps == 1:
result = result + '-INITC'
if lower in words_dict:
result = result + '-KNOWNLC'
else:
result = result + '-CAPS'
elif not(ch0.isalpha()) and numCaps > 0:
result = result + '-CAPS'
elif hasLower:
result = result + '-LC'
if hasDigit:
result = result + '-NUM'
if hasDash:
result = result + '-DASH'
if lower[-1] == 's' and len(lower) >= 3:
ch2 = lower[-2]
if not(ch2 == 's') and not(ch2 == 'i') and not(ch2 == 'u'):
result = result + '-s'
elif len(lower) >= 5 and not(hasDash) and not(hasDigit and numCaps > 0):
if lower[-2:] == 'ed':
result = result + '-ed'
elif lower[-3:] == 'ing':
result = result + '-ing'
elif lower[-3:] == 'ion':
result = result + '-ion'
elif lower[-2:] == 'er':
result = result + '-er'
elif lower[-3:] == 'est':
result = result + '-est'
elif lower[-2:] == 'ly':
result = result + '-ly'
elif lower[-3:] == 'ity':
result = result + '-ity'
elif lower[-1] == 'y':
result = result + '-y'
elif lower[-2:] == 'al':
result = result + '-al'
final.append(result)
else:
final.append(token.rstrip())
return final
def is_next_open_bracket(line, start_idx):
for char in line[(start_idx + 1):]:
if char == '(':
return True
elif char == ')':
return False
raise IndexError('Bracket possibly not balanced, open bracket not followed by closed bracket')
def get_between_brackets(line, start_idx):
output = []
for char in line[(start_idx + 1):]:
if char == ')':
break
assert not(char == '(')
output.append(char)
return ''.join(output)
# start_idx = open bracket
#def skip_terminals(line, start_idx):
# line_end_idx = len(line) - 1
# for i in range(start_idx + 1, line_end_idx):
# if line[i] == ')':
# assert line[i + 1] == ' '
# return (i + 2)
# raise IndexError('No close bracket found in a terminal')
def get_tags_tokens_lowercase(line):
output = []
#print 'curr line', line_strip
line_strip = line.rstrip()
#print 'length of the sentence', len(line_strip)
for i in range(len(line_strip)):
if i == 0:
assert line_strip[i] == '('
if line_strip[i] == '(' and not(is_next_open_bracket(line_strip, i)): # fulfilling this condition means this is a terminal symbol
output.append(get_between_brackets(line_strip, i))
#print 'output:',output
output_tags = []
output_tokens = []
output_lowercase = []
for terminal in output:
terminal_split = terminal.split()
assert len(terminal_split) == 2 # each terminal contains a POS tag and word
output_tags.append(terminal_split[0])
output_tokens.append(terminal_split[1])
output_lowercase.append(terminal_split[1].lower())
return [output_tags, output_tokens, output_lowercase]
def get_nonterminal(line, start_idx):
assert line[start_idx] == '(' # make sure it's an open bracket
output = []
for char in line[(start_idx + 1):]:
if char == ' ':
break
assert not(char == '(') and not(char == ')')
output.append(char)
return ''.join(output)
def get_actions(line):
output_actions = []
line_strip = line.rstrip()
i = 0
max_idx = (len(line_strip) - 1)
while i <= max_idx:
assert line_strip[i] == '(' or line_strip[i] == ')'
if line_strip[i] == '(':
if is_next_open_bracket(line_strip, i): # open non-terminal
curr_NT = get_nonterminal(line_strip, i)
output_actions.append('NT(' + curr_NT + ')')
i += 1
while line_strip[i] != '(': # get the next open bracket, which may be a terminal or another non-terminal
i += 1
else: # it's a terminal symbol
output_actions.append('SHIFT')
while line_strip[i] != ')':
i += 1
i += 1
while line_strip[i] != ')' and line_strip[i] != '(':
i += 1
else:
output_actions.append('REDUCE')
if i == max_idx:
break
i += 1
while line_strip[i] != ')' and line_strip[i] != '(':
i += 1
assert i == max_idx
return output_actions
def main():
if len(sys.argv) != 3:
raise NotImplementedError('Program only takes two arguments: train file and dev file (for vocabulary mapping purposes)')
train_file = open(sys.argv[1], 'r')
lines = train_file.readlines()
train_file.close()
dev_file = open(sys.argv[2], 'r')
dev_lines = dev_file.readlines()
dev_file.close()
words_list = get_dictionary.get_dict(lines)
line_ctr = 0
# get the oracle for the train file
for line in dev_lines:
line_ctr += 1
# assert that the parenthesis are balanced
if line.count('(') != line.count(')'):
raise NotImplementedError('Unbalanced number of parenthesis in line ' + str(line_ctr))
# first line: the bracketed tree itself itself
print '# ' + line.rstrip()
tags, tokens, lowercase = get_tags_tokens_lowercase(line)
assert len(tags) == len(tokens)
assert len(tokens) == len(lowercase)
#print ' '.join(tags)
print ' '.join(tokens)
#print ' '.join(lowercase)
unkified = unkify(tokens, words_list)
print ' '.join(unkified)
output_actions = get_actions(line)
for action in output_actions:
print action
print ''
if __name__ == "__main__":
main()
| {
"content_hash": "6aed76fe18fa1fe6570a3aeebc4789d5",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 137,
"avg_line_length": 36.91836734693877,
"alnum_prop": 0.4906025428413488,
"repo_name": "clab/lstm-parser-with-beam-search",
"id": "6581800143b77156737b1da9fc1304eca5d4d5e4",
"size": "7236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "get_oracle_gen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "130987"
},
{
"name": "CMake",
"bytes": "4207"
},
{
"name": "Perl",
"bytes": "52138"
},
{
"name": "Python",
"bytes": "18949"
},
{
"name": "Shell",
"bytes": "6827"
}
],
"symlink_target": ""
} |
"""
This module provides some helper functions to allow straightforward publishing
of messages in a one-shot manner. In other words, they are useful for the
situation where you have a single/multiple messages you want to publish to a
broker, then disconnect and nothing else is required.
"""
import paho.mqtt.client as mqtt
def _do_publish(c):
"""Internal function"""
m = c._userdata[0]
c._userdata = c._userdata[1:]
if type(m) is dict:
topic = m['topic']
try:
payload = m['payload']
except KeyError:
payload = None
try:
qos = m['qos']
except KeyError:
qos = 0
try:
retain = m['retain']
except KeyError:
retain = False
elif type(m) is tuple:
(topic, payload, qos, retain) = m
else:
raise ValueError('message must be a dict or a tuple')
c.publish(topic, payload, qos, retain)
def _on_connect(c, userdata, flags, rc):
"""Internal callback"""
_do_publish(c)
def _on_publish(c, userdata, mid):
"""Internal callback"""
if len(userdata) == 0:
c.disconnect()
else:
_do_publish(c)
def multiple(msgs, hostname="localhost", port=1883, client_id="", keepalive=60,
will=None, auth=None, tls=None, protocol=mqtt.MQTTv311):
"""Publish multiple messages to a broker, then disconnect cleanly.
This function creates an MQTT client, connects to a broker and publishes a
list of messages. Once the messages have been delivered, it disconnects
cleanly from the broker.
msgs : a list of messages to publish. Each message is either a dict or a
tuple.
If a dict, only the topic must be present. Default values will be
used for any missing arguments. The dict must be of the form:
msg = {'topic':"<topic>", 'payload':"<payload>", 'qos':<qos>,
'retain':<retain>}
topic must be present and may not be empty.
If payload is "", None or not present then a zero length payload
will be published.
If qos is not present, the default of 0 is used.
If retain is not present, the default of False is used.
If a tuple, then it must be of the form:
("<topic>", "<payload>", qos, retain)
hostname : a string containing the address of the broker to connect to.
Defaults to localhost.
port : the port to connect to the broker on. Defaults to 1883.
client_id : the MQTT client id to use. If "" or None, the Paho library will
generate a client id automatically.
keepalive : the keepalive timeout value for the client. Defaults to 60
seconds.
will : a dict containing will parameters for the client: will = {'topic':
"<topic>", 'payload':"<payload">, 'qos':<qos>, 'retain':<retain>}.
Topic is required, all other parameters are optional and will
default to None, 0 and False respectively.
Defaults to None, which indicates no will should be used.
auth : a dict containing authentication parameters for the client:
auth = {'username':"<username>", 'password':"<password>"}
Username is required, password is optional and will default to None
if not provided.
Defaults to None, which indicates no authentication is to be used.
tls : a dict containing TLS configuration parameters for the client:
dict = {'ca_certs':"<ca_certs>", 'certfile':"<certfile>",
'keyfile':"<keyfile>", 'tls_version':"<tls_version>",
'ciphers':"<ciphers">}
ca_certs is required, all other parameters are optional and will
default to None if not provided, which results in the client using
the default behaviour - see the paho.mqtt.client documentation.
Defaults to None, which indicates that TLS should not be used.
"""
if type(msgs) is not list:
raise ValueError('msgs must be a list')
client = mqtt.Client(client_id=client_id,
userdata=msgs, protocol=protocol)
client.on_publish = _on_publish
client.on_connect = _on_connect
if auth is not None:
username = auth['username']
try:
password = auth['password']
except KeyError:
password = None
client.username_pw_set(username, password)
if will is not None:
will_topic = will['topic']
try:
will_payload = will['payload']
except KeyError:
will_payload = None
try:
will_qos = will['qos']
except KeyError:
will_qos = 0
try:
will_retain = will['retain']
except KeyError:
will_retain = False
client.will_set(will_topic, will_payload, will_qos, will_retain)
if tls is not None:
ca_certs = tls['ca_certs']
try:
certfile = tls['certfile']
except KeyError:
certfile = None
try:
keyfile = tls['keyfile']
except KeyError:
keyfile = None
try:
tls_version = tls['tls_version']
except KeyError:
tls_version = None
try:
ciphers = tls['ciphers']
except KeyError:
ciphers = None
client.tls_set(ca_certs, certfile, keyfile, tls_version=tls_version,
ciphers=ciphers)
client.connect(hostname, port, keepalive)
client.loop_forever()
def single(topic, payload=None, qos=0, retain=False, hostname="localhost",
port=1883, client_id="", keepalive=60, will=None, auth=None,
tls=None, protocol=mqtt.MQTTv311):
"""Publish a single message to a broker, then disconnect cleanly.
This function creates an MQTT client, connects to a broker and publishes a
single message. Once the message has been delivered, it disconnects cleanly
from the broker.
topic : the only required argument must be the topic string to which the
payload will be published.
payload : the payload to be published. If "" or None, a zero length payload
will be published.
qos : the qos to use when publishing, default to 0.
retain : set the message to be retained (True) or not (False).
hostname : a string containing the address of the broker to connect to.
Defaults to localhost.
port : the port to connect to the broker on. Defaults to 1883.
client_id : the MQTT client id to use. If "" or None, the Paho library will
generate a client id automatically.
keepalive : the keepalive timeout value for the client. Defaults to 60
seconds.
will : a dict containing will parameters for the client: will = {'topic':
"<topic>", 'payload':"<payload">, 'qos':<qos>, 'retain':<retain>}.
Topic is required, all other parameters are optional and will
default to None, 0 and False respectively.
Defaults to None, which indicates no will should be used.
auth : a dict containing authentication parameters for the client:
auth = {'username':"<username>", 'password':"<password>"}
Username is required, password is optional and will default to None
if not provided.
Defaults to None, which indicates no authentication is to be used.
tls : a dict containing TLS configuration parameters for the client:
dict = {'ca_certs':"<ca_certs>", 'certfile':"<certfile>",
'keyfile':"<keyfile>", 'tls_version':"<tls_version>",
'ciphers':"<ciphers">}
ca_certs is required, all other parameters are optional and will
default to None if not provided, which results in the client using
the default behaviour - see the paho.mqtt.client documentation.
Defaults to None, which indicates that TLS should not be used.
"""
msg = {'topic':topic, 'payload':payload, 'qos':qos, 'retain':retain}
multiple([msg], hostname, port, client_id, keepalive, will, auth, tls)
| {
"content_hash": "7d0a827d9c245c20276e0f88c3d7cd9e",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 79,
"avg_line_length": 40.22167487684729,
"alnum_prop": 0.6153092467850582,
"repo_name": "wendal/yeelink_tester",
"id": "029721b317ac8f58c3b15fbb9cdae327b9638242",
"size": "8705",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "paho/mqtt/publish.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "292344"
}
],
"symlink_target": ""
} |
import os
from pathlib import Path
from setuptools import setup, find_packages, Command
ROOTDIR = Path(__file__).parent
__version__ = None # Overwritten by executing version.py.
with open(ROOTDIR / "puncover/version.py") as f:
exec(f.read())
with open(ROOTDIR / "requirements-test.txt") as f:
tests_require = list(filter(lambda x: not x.strip().startswith('-r'), f.readlines()))
with open(ROOTDIR / "requirements.txt") as f:
requires = f.readlines()
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
# http://stackoverflow.com/a/3780822/196350
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system("rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info")
setup(
name="puncover",
version=__version__,
description="Analyses C/C++ build output for code size, static variables, and stack usage.",
long_description=open("README.rst").read(),
long_description_content_type="text/x-rst",
url="https://github.com/hbehrens/puncover",
download_url="https://github.com/hbehrens/puncover/tarball/%s" % __version__,
author="Heiko Behrens",
license="MIT",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
],
packages=find_packages(exclude=["tests", "tests.*"]),
include_package_data=True,
zip_safe=False,
entry_points={"console_scripts": ["puncover = puncover.puncover:main"]},
install_requires=requires,
tests_require=tests_require,
cmdclass={
"clean": CleanCommand,
},
# TODO: https://github.com/HBehrens/puncover/issues/36
# Fix Python 3.5
python_requires=">=3.6",
)
| {
"content_hash": "db9c163732e8ce5a4e2a2dab6c997766",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 96,
"avg_line_length": 31.319444444444443,
"alnum_prop": 0.6301552106430155,
"repo_name": "HBehrens/puncover",
"id": "d94c0f7831abb06e3c352e9ba6128818bced47cb",
"size": "2277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1767"
},
{
"name": "Jinja",
"bytes": "17098"
},
{
"name": "Python",
"bytes": "77320"
},
{
"name": "Shell",
"bytes": "913"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import copy
import errno
import gc
import logging
import os
import pprint
import socket
import sys
import traceback
import eventlet.backdoor
import greenlet
from oslo_config import cfg
from conveyor.common._i18n import _LI
help_for_backdoor_port = (
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
"in listening on a random tcp port number; <port> results in listening "
"on the specified port number (and not enabling backdoor if that port "
"is in use); and <start>:<end> results in listening on the smallest "
"unused port number within the specified range of port numbers. The "
"chosen port is displayed in the service's log file.")
eventlet_backdoor_opts = [
cfg.StrOpt('backdoor_port',
help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
]
CONF = cfg.CONF
CONF.register_opts(eventlet_backdoor_opts)
LOG = logging.getLogger(__name__)
def list_opts():
"""Entry point for oslo_config-generator.
"""
return [(None, copy.deepcopy(eventlet_backdoor_opts))]
class EventletBackdoorConfigValueError(Exception):
def __init__(self, port_range, help_msg, ex):
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
'%(help)s' %
{'range': port_range, 'ex': ex, 'help': help_msg})
super(EventletBackdoorConfigValueError, self).__init__(msg)
self.port_range = port_range
def _dont_use_this():
print("Don't use this, just disconnect instead")
def _find_objects(t):
return [o for o in gc.get_objects() if isinstance(o, t)]
def _print_greenthreads():
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
print(i, gt)
traceback.print_stack(gt.gr_frame)
print()
def _print_nativethreads():
for threadId, stack in sys._current_frames().items():
print(threadId)
traceback.print_stack(stack)
print()
def _parse_port_range(port_range):
if ':' not in port_range:
start, end = port_range, port_range
else:
start, end = port_range.split(':', 1)
try:
start, end = int(start), int(end)
if end < start:
raise ValueError
return start, end
except ValueError as ex:
raise EventletBackdoorConfigValueError(port_range, ex,
help_for_backdoor_port)
def _listen(host, start_port, end_port, listen_func):
try_port = start_port
while True:
try:
return listen_func((host, try_port))
except socket.error as exc:
if (exc.errno != errno.EADDRINUSE or
try_port >= end_port):
raise
try_port += 1
def initialize_if_enabled():
backdoor_locals = {
'exit': _dont_use_this, # So we don't exit the entire process
'quit': _dont_use_this, # So we don't exit the entire process
'fo': _find_objects,
'pgt': _print_greenthreads,
'pnt': _print_nativethreads,
}
if CONF.backdoor_port is None:
return None
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
# NOTE(johannes): The standard sys.displayhook will print the value of
# the last expression and set it to __builtin__._, which overwrites
# the __builtin__._ that gettext sets. Let's switch to using pprint
# since it won't interact poorly with gettext, and it's easier to
# read the output too.
def displayhook(val):
if val is not None:
pprint.pprint(val)
sys.displayhook = displayhook
sock = _listen('localhost', start_port, end_port, eventlet.listen)
# In the case of backdoor port being zero, a port number is assigned by
# listen(). In any case, pull the port number out here.
port = sock.getsockname()[1]
LOG.info(
_LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
{'port': port, 'pid': os.getpid()}
)
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
locals=backdoor_locals)
return port
| {
"content_hash": "29bf56f877177aaf0c60a92bdaf70331",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 77,
"avg_line_length": 30.659259259259258,
"alnum_prop": 0.6272046388016429,
"repo_name": "Hybrid-Cloud/conveyor",
"id": "bba19da1336cd6bc7fbfba021c495a58e69be882",
"size": "4850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conveyor/common/eventlet_backdoor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3789174"
},
{
"name": "Shell",
"bytes": "16567"
}
],
"symlink_target": ""
} |
"""
Design and implement methods of an LRU cache. There are two methods defined as follows:
get(x): Gets the value of the key x if the key exists in the cache, otherwise returns -1.
set(x,y): Inserts the value if the key x is not already present. If the cache reaches its capacity, it should
invalidate the least recently used item before inserting the new item.
In the constructor of the class, the capacity of cache needs to be initialized.
"""
"""
Approach:
1. We will utilize two data structures to implement the LRU cache.
2. Firstly, we will need a Queue to store the actual data associated with each key. It will be implemented
as a doubly linked list. One important feature of the Queue will be that the most recently used items
will be near the front and the least recently used items will be near the rear end.
3. Secondly, we will use a Hashtable to store the keys. The values will be pointers to corresponding nodes
in the Queue.
4. After each get/set, the corresponding node will be moved to front of queue and corresponding Hashtable entry
updated. Also, when max capacity is reached, we will delete the last entry from the Queue and corresponding
entry from Hashtable.
5. This will give O(1) time complexity for both get and set operations.
"""
class Node:
def __init__(self, key, data, after=None, before=None):
self.key = key
self.data = data
self.after = after
self.before = before
class DLLQueue:
def __init__(self):
self.front = None
self.rear = None
self.size = 0
def enqueue(self, key, value):
node = Node(key, value)
if self.size == 0:
self.front = node
self.rear = node
else:
self.front.before = node
node.after = self.front
self.front = node
self.size += 1
return node
def dequeue(self):
if self.size == 0:
return
key = self.rear.key
if self.size == 1:
self.front = None
self.rear = None
else:
self.rear = self.rear.before
self.rear.after = None
self.size -= 1
return key
def move_to_front(self, node):
if self.front is node:
return
if self.rear is node:
self.rear = self.rear.before
if node.after is not None:
node.after.before = node.before
node.before.after = node.after
self.front.before = node
node.after = self.front
node.before = None
self.front = node
class LRUCache:
def __init__(self, capacity):
self.capacity = capacity
self.table = {}
self.queue = DLLQueue()
def get(self, key):
if key not in self.table:
return -1
node = self.table[key]
self.queue.move_to_front(node)
return node.data
def is_full(self):
return self.capacity == self.queue.size
def is_empty(self):
return self.queue.size == 0
def set(self, key, value):
if key not in self.table:
if self.is_full():
old = self.queue.dequeue()
del self.table[old]
self.table[key] = self.queue.enqueue(key, value)
else:
node = self.table[key]
self.queue.move_to_front(node)
node.data = value
def size(self):
return self.queue.size
if __name__ == '__main__':
cache = LRUCache(2)
cache.set(1, 2)
cache.set(2, 3)
cache.set(1, 5)
cache.set(4, 5)
cache.set(6, 7)
print cache.get(4)
print cache.get(1)
| {
"content_hash": "49e735b4179d8e87c0b9b4bb38d0a1e7",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 111,
"avg_line_length": 30.198347107438018,
"alnum_prop": 0.6023535851122058,
"repo_name": "prathamtandon/g4gproblems",
"id": "9c4d8f3394dcf16fba2997bccbac90432fdea71e",
"size": "3654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Data Structures/LRU_cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "328776"
}
],
"symlink_target": ""
} |
from unittest import mock
from django.test import TestCase
from p3.models import TICKET_CONFERENCE_SHIRT_SIZES, TICKET_CONFERENCE_DIETS
from p3.stats import shirt_sizes, diet_types
from . import factories
class StatsTestCase(TestCase):
def setUp(self):
self.conference = factories.ConferenceFactory()
self.user = factories.UserFactory()
self.assopy_user = self.user.assopy_user
def test_creation_option(self):
from p3.stats import _create_option
total_qs = mock.Mock()
total_qs.count.return_value = 1
output = _create_option('id', 'title', total_qs)
self.assertDictEqual(output, {
'id': 'id',
'title': 'title',
'total': 1,
})
def test_tickets(self):
from p3.stats import _tickets
tickets = _tickets(self.conference)
def test_assigned_tickets(self):
from p3.stats import _assigned_tickets
tickets = _assigned_tickets(self.conference)
def test_unassigned_tickets(self):
from p3.stats import _unassigned_tickets
tickets = _unassigned_tickets(self.conference)
@mock.patch('email_template.utils.email')
@mock.patch('django.core.mail.send_mail')
def test_shirt_sizes(self, mock_send_email, mock_email):
fare = factories.FareFactory(conference=self.conference.code, ticket_type='conference')
ticket = factories.TicketFactory(fare=fare, user=self.user, frozen=False)
ticket_conference = factories.TicketConferenceFactory(ticket=ticket, assigned_to=self.user.email)
order = factories.CreditCardOrderFactory(user=self.assopy_user)
vat = factories.VatFactory()
order._complete = True
order.save()
_ = factories.OrderItemFactory(order=order, ticket=ticket, price=1, vat=vat)
repartition = shirt_sizes(self.conference)
assert repartition[0] == {
'total': 1,
'title': dict(TICKET_CONFERENCE_SHIRT_SIZES)[ticket_conference.shirt_size],
}
@mock.patch('email_template.utils.email')
@mock.patch('django.core.mail.send_mail')
def test_diet_types(self, mock_send_email, mock_email):
fare = factories.FareFactory(conference=self.conference.code, ticket_type='conference')
ticket = factories.TicketFactory(fare=fare, user=self.user, frozen=False)
ticket_conference = factories.TicketConferenceFactory(ticket=ticket, assigned_to=self.user.email)
order = factories.CreditCardOrderFactory(user=self.assopy_user)
vat = factories.VatFactory()
order._complete = True
order.save()
_ = factories.OrderItemFactory(order=order, ticket=ticket, price=1, vat=vat)
repartition = diet_types(self.conference)
assert repartition[0] == {
'total': 1,
'title': dict(TICKET_CONFERENCE_DIETS)[ticket_conference.diet],
}
def test_presence_days(self):
from p3.stats import presence_days
repartition = presence_days(self.conference)
def test_tickets_status(self):
from p3.stats import tickets_status
repartition = tickets_status(self.conference)
def test_speaker_status(self):
from p3.stats import speaker_status
repartition = speaker_status(self.conference)
def test_conference_speakers(self):
from p3.stats import conference_speakers
repartition = conference_speakers(self.conference)
def test_conference_speakers_day(self):
from p3.stats import conference_speakers_day
repartition = conference_speakers_day(self.conference)
def test_pp_tickets(self):
from p3.stats import pp_tickets
repartition = pp_tickets(self.conference)
| {
"content_hash": "bf36176b5fb2450115375bb2f5d98076",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 105,
"avg_line_length": 37.0990099009901,
"alnum_prop": 0.6688017080330931,
"repo_name": "EuroPython/epcon",
"id": "b6b6efeeb8e8c09ea4a349165202c76ca76bc428",
"size": "3747",
"binary": false,
"copies": "1",
"ref": "refs/heads/ep2021",
"path": "tests/test_stats.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "6475"
},
{
"name": "Dockerfile",
"bytes": "609"
},
{
"name": "HTML",
"bytes": "412025"
},
{
"name": "JavaScript",
"bytes": "421281"
},
{
"name": "Makefile",
"bytes": "4679"
},
{
"name": "Python",
"bytes": "991334"
},
{
"name": "Shell",
"bytes": "1182"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
import json
import os
try:
import koji
except ImportError:
import inspect
import sys
# Find out mocked koji module
import tests.koji as koji
mock_koji_path = os.path.dirname(inspect.getfile(koji.ClientSession))
if mock_koji_path not in sys.path:
sys.path.append(os.path.dirname(mock_koji_path))
# Now load it properly, the same way the plugin will
del koji
import koji
from atomic_reactor.core import DockerTasker
from atomic_reactor.plugins.exit_koji_promote import (KojiUploadLogger,
KojiPromotePlugin)
from atomic_reactor.plugins.post_rpmqa import PostBuildRPMqaPlugin
from atomic_reactor.plugins.pre_check_and_set_rebuild import CheckAndSetRebuildPlugin
from atomic_reactor.plugins.pre_add_filesystem import AddFilesystemPlugin
from atomic_reactor.plugin import ExitPluginsRunner, PluginFailedException
from atomic_reactor.inner import DockerBuildWorkflow, TagConf, PushConf
from atomic_reactor.util import ImageName
from atomic_reactor.source import GitSource, PathSource
from tests.constants import SOURCE, MOCK
from flexmock import flexmock
import pytest
from tests.docker_mock import mock_docker
import subprocess
from osbs.api import OSBS
from osbs.exceptions import OsbsException
from six import string_types
NAMESPACE = 'mynamespace'
BUILD_ID = 'build-1'
class X(object):
pass
class MockedPodResponse(object):
def get_container_image_ids(self):
return {'buildroot:latest': '0123456'}
class MockedClientSession(object):
TAG_TASK_ID = 1234
DEST_TAG = 'images-candidate'
def __init__(self, hub, task_states=None):
self.uploaded_files = []
self.build_tags = {}
self.task_states = task_states or ['FREE', 'ASSIGNED', 'CLOSED']
self.task_states = list(self.task_states)
self.task_states.reverse()
self.tag_task_state = self.task_states.pop()
def krb_login(self, principal=None, keytab=None, proxyuser=None):
return True
def ssl_login(self, cert, ca, serverca, proxyuser=None):
return True
def logout(self):
pass
def uploadWrapper(self, localfile, path, name=None, callback=None,
blocksize=1048576, overwrite=True):
self.uploaded_files.append(path)
self.blocksize = blocksize
def CGImport(self, metadata, server_dir):
self.metadata = metadata
self.server_dir = server_dir
return {"id": "123"}
def getBuildTarget(self, target):
return {'dest_tag_name': self.DEST_TAG}
def tagBuild(self, tag, build, force=False, fromtag=None):
self.build_tags[build] = tag
return self.TAG_TASK_ID
def getTaskInfo(self, task_id, request=False):
assert task_id == self.TAG_TASK_ID
# For extra code coverage, imagine Koji denies the task ever
# existed.
if self.tag_task_state is None:
return None
return {'state': koji.TASK_STATES[self.tag_task_state]}
def taskFinished(self, task_id):
try:
self.tag_task_state = self.task_states.pop()
except IndexError:
# No more state changes
pass
return self.tag_task_state in ['CLOSED', 'FAILED', 'CANCELED', None]
FAKE_SIGMD5 = b'0' * 32
FAKE_RPM_OUTPUT = (
b'name1;1.0;1;x86_64;0;' + FAKE_SIGMD5 + b';(none);'
b'RSA/SHA256, Mon 29 Jun 2015 13:58:22 BST, Key ID abcdef01234567\n'
b'gpg-pubkey;01234567;01234567;(none);(none);(none);(none);(none)\n'
b'gpg-pubkey-doc;01234567;01234567;noarch;(none);' + FAKE_SIGMD5 +
b';(none);(none)\n'
b'name2;2.0;2;x86_64;0;' + FAKE_SIGMD5 + b';' +
b'RSA/SHA256, Mon 29 Jun 2015 13:58:22 BST, Key ID bcdef012345678;(none)\n'
b'\n')
FAKE_OS_OUTPUT = 'fedora-22'
def fake_subprocess_output(cmd):
if cmd.startswith('/bin/rpm'):
return FAKE_RPM_OUTPUT
elif 'os-release' in cmd:
return FAKE_OS_OUTPUT
else:
raise RuntimeError
class MockedPopen(object):
def __init__(self, cmd, *args, **kwargs):
self.cmd = cmd
def wait(self):
return 0
def communicate(self):
return (fake_subprocess_output(self.cmd), '')
def fake_Popen(cmd, *args, **kwargs):
return MockedPopen(cmd, *args, **kwargs)
def fake_digest(image):
tag = image.to_str(registry=False)
return 'sha256:{0:032x}'.format(len(tag))
def is_string_type(obj):
return any(isinstance(obj, strtype)
for strtype in string_types)
def mock_environment(tmpdir, session=None, name=None,
component=None, version=None, release=None,
source=None, build_process_failed=False,
is_rebuild=True, pulp_registries=0, blocksize=None,
task_states=None):
if session is None:
session = MockedClientSession('', task_states=None)
if source is None:
source = GitSource('git', 'git://hostname/path')
if MOCK:
mock_docker()
tasker = DockerTasker()
workflow = DockerBuildWorkflow(SOURCE, "test-image")
base_image_id = '123456parent-id'
setattr(workflow, '_base_image_inspect', {'Id': base_image_id})
setattr(workflow, 'builder', X())
setattr(workflow.builder, 'image_id', '123456imageid')
setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='22'))
setattr(workflow.builder, 'source', X())
setattr(workflow.builder, 'built_image_info', {'ParentId': base_image_id})
setattr(workflow.builder.source, 'dockerfile_path', None)
setattr(workflow.builder.source, 'path', None)
setattr(workflow, 'tag_conf', TagConf())
with open(os.path.join(str(tmpdir), 'Dockerfile'), 'wt') as df:
df.write('FROM base\n'
'LABEL BZComponent={component} com.redhat.component={component}\n'
'LABEL Version={version} version={version}\n'
'LABEL Release={release} release={release}\n'
.format(component=component, version=version, release=release))
setattr(workflow.builder, 'df_path', df.name)
if name and version:
workflow.tag_conf.add_unique_image('user/test-image:{v}-timestamp'
.format(v=version))
if name and version and release:
workflow.tag_conf.add_primary_images(["{0}:{1}-{2}".format(name,
version,
release),
"{0}:{1}".format(name, version),
"{0}:latest".format(name)])
flexmock(subprocess, Popen=fake_Popen)
flexmock(koji, ClientSession=lambda hub: session)
flexmock(GitSource)
(flexmock(OSBS)
.should_receive('get_build_logs')
.with_args(BUILD_ID)
.and_return('build logs'))
(flexmock(OSBS)
.should_receive('get_pod_for_build')
.with_args(BUILD_ID)
.and_return(MockedPodResponse()))
setattr(workflow, 'source', source)
setattr(workflow.source, 'lg', X())
setattr(workflow.source.lg, 'commit_id', '123456')
setattr(workflow, 'build_logs', ['docker build log\n'])
setattr(workflow, 'push_conf', PushConf())
docker_reg = workflow.push_conf.add_docker_registry('docker.example.com')
for image in workflow.tag_conf.images:
tag = image.to_str(registry=False)
docker_reg.digests[tag] = fake_digest(image)
for pulp_registry in range(pulp_registries):
workflow.push_conf.add_pulp_registry('env', 'pulp.example.com')
with open(os.path.join(str(tmpdir), 'image.tar.xz'), 'wt') as fp:
fp.write('x' * 2**12)
setattr(workflow, 'exported_image_sequence', [{'path': fp.name}])
setattr(workflow, 'build_failed', build_process_failed)
workflow.prebuild_results[CheckAndSetRebuildPlugin.key] = is_rebuild
workflow.postbuild_results[PostBuildRPMqaPlugin.key] = [
"name1,1.0,1,x86_64,0,2000," + FAKE_SIGMD5.decode() + ",23000",
"name2,2.0,1,x86_64,0,3000," + FAKE_SIGMD5.decode() + ",24000",
]
return tasker, workflow
@pytest.fixture
def os_env(monkeypatch):
monkeypatch.setenv('BUILD', json.dumps({
"metadata": {
"creationTimestamp": "2015-07-27T09:24:00Z",
"namespace": NAMESPACE,
"name": BUILD_ID,
}
}))
monkeypatch.setenv('OPENSHIFT_CUSTOM_BUILD_BASE_IMAGE', 'buildroot:latest')
def create_runner(tasker, workflow, ssl_certs=False, principal=None,
keytab=None, metadata_only=False, blocksize=None,
target=None):
args = {
'kojihub': '',
'url': '/',
}
if ssl_certs:
args['koji_ssl_certs'] = '/'
if principal:
args['koji_principal'] = principal
if keytab:
args['koji_keytab'] = keytab
if metadata_only:
args['metadata_only'] = True
if blocksize:
args['blocksize'] = blocksize
if target:
args['target'] = target
args['poll_interval'] = 0
runner = ExitPluginsRunner(tasker, workflow,
[
{
'name': KojiPromotePlugin.key,
'args': args,
},
])
return runner
class TestKojiUploadLogger(object):
@pytest.mark.parametrize('totalsize', [0, 1024])
def test_with_zero(self, totalsize):
logger = flexmock()
logger.should_receive('debug').once()
upload_logger = KojiUploadLogger(logger)
upload_logger.callback(0, totalsize, 0, 0, 0)
@pytest.mark.parametrize(('totalsize', 'step', 'expected_times'), [
(10, 1, 11),
(12, 1, 7),
(12, 3, 5),
])
def test_with_defaults(self, totalsize, step, expected_times):
logger = flexmock()
logger.should_receive('debug').times(expected_times)
upload_logger = KojiUploadLogger(logger)
upload_logger.callback(0, totalsize, 0, 0, 0)
for offset in range(step, totalsize + step, step):
upload_logger.callback(offset, totalsize, step, 1.0, 1.0)
@pytest.mark.parametrize(('totalsize', 'step', 'notable', 'expected_times'), [
(10, 1, 10, 11),
(10, 1, 20, 6),
(10, 1, 25, 5),
(12, 3, 25, 5),
])
def test_with_notable(self, totalsize, step, notable, expected_times):
logger = flexmock()
logger.should_receive('debug').times(expected_times)
upload_logger = KojiUploadLogger(logger, notable_percent=notable)
for offset in range(0, totalsize + step, step):
upload_logger.callback(offset, totalsize, step, 1.0, 1.0)
class TestKojiPromote(object):
def test_koji_promote_failed_build(self, tmpdir, os_env):
session = MockedClientSession('')
tasker, workflow = mock_environment(tmpdir,
session=session,
build_process_failed=True,
name='ns/name',
version='1.0',
release='1')
runner = create_runner(tasker, workflow)
runner.run()
# Must not have promoted this build
assert not hasattr(session, 'metadata')
def test_koji_promote_no_tagconf(self, tmpdir, os_env):
tasker, workflow = mock_environment(tmpdir)
runner = create_runner(tasker, workflow)
with pytest.raises(PluginFailedException):
runner.run()
def test_koji_promote_no_build_env(self, tmpdir, monkeypatch, os_env):
tasker, workflow = mock_environment(tmpdir,
name='ns/name',
version='1.0',
release='1')
runner = create_runner(tasker, workflow)
# No BUILD environment variable
monkeypatch.delenv("BUILD", raising=False)
with pytest.raises(PluginFailedException) as exc:
runner.run()
assert "plugin 'koji_promote' raised an exception: KeyError" in str(exc)
def test_koji_promote_no_build_metadata(self, tmpdir, monkeypatch, os_env):
tasker, workflow = mock_environment(tmpdir,
name='ns/name',
version='1.0',
release='1')
runner = create_runner(tasker, workflow)
# No BUILD metadata
monkeypatch.setenv("BUILD", json.dumps({}))
with pytest.raises(PluginFailedException):
runner.run()
def test_koji_promote_wrong_source_type(self, tmpdir, os_env):
source = PathSource('path', 'file:///dev/null')
tasker, workflow = mock_environment(tmpdir,
name='ns/name',
version='1.0',
release='1',
source=source)
runner = create_runner(tasker, workflow)
with pytest.raises(PluginFailedException) as exc:
runner.run()
assert "plugin 'koji_promote' raised an exception: RuntimeError" in str(exc)
def test_koji_promote_log_task_id(self, tmpdir, monkeypatch, os_env,
caplog):
session = MockedClientSession('')
tasker, workflow = mock_environment(tmpdir,
session=session,
name='ns/name',
version='1.0',
release='1')
runner = create_runner(tasker, workflow)
koji_task_id = '12345'
monkeypatch.setenv("BUILD", json.dumps({
'metadata': {
'creationTimestamp': '2015-07-27T09:24:00Z',
'namespace': NAMESPACE,
'name': BUILD_ID,
'labels': {
'koji-task-id': koji_task_id,
},
}
}))
runner.run()
assert "Koji Task ID {}".format(koji_task_id) in caplog.text()
metadata = session.metadata
assert 'build' in metadata
build = metadata['build']
assert isinstance(build, dict)
assert 'extra' in build
extra = build['extra']
assert isinstance(extra, dict)
assert 'container_koji_task_id' in extra
extra_koji_task_id = extra['container_koji_task_id']
assert is_string_type(extra_koji_task_id)
assert extra_koji_task_id == koji_task_id
@pytest.mark.parametrize('params', [
{
'should_raise': False,
'principal': None,
'keytab': None,
},
{
'should_raise': False,
'principal': 'principal@EXAMPLE.COM',
'keytab': 'FILE:/var/run/secrets/mysecret',
},
{
'should_raise': True,
'principal': 'principal@EXAMPLE.COM',
'keytab': None,
},
{
'should_raise': True,
'principal': None,
'keytab': 'FILE:/var/run/secrets/mysecret',
},
])
def test_koji_promote_krb_args(self, tmpdir, params, os_env):
session = MockedClientSession('')
expectation = flexmock(session).should_receive('krb_login').and_return(True)
name = 'name'
version = '1.0'
release = '1'
tasker, workflow = mock_environment(tmpdir,
session=session,
name=name,
version=version,
release=release)
runner = create_runner(tasker, workflow,
principal=params['principal'],
keytab=params['keytab'])
if params['should_raise']:
expectation.never()
with pytest.raises(PluginFailedException):
runner.run()
else:
expectation.once()
runner.run()
def test_koji_promote_krb_fail(self, tmpdir, os_env):
session = MockedClientSession('')
(flexmock(session)
.should_receive('krb_login')
.and_raise(RuntimeError)
.once())
tasker, workflow = mock_environment(tmpdir,
session=session,
name='ns/name',
version='1.0',
release='1')
runner = create_runner(tasker, workflow)
with pytest.raises(PluginFailedException):
runner.run()
def test_koji_promote_ssl_fail(self, tmpdir, os_env):
session = MockedClientSession('')
(flexmock(session)
.should_receive('ssl_login')
.and_raise(RuntimeError)
.once())
tasker, workflow = mock_environment(tmpdir,
session=session,
name='ns/name',
version='1.0',
release='1')
runner = create_runner(tasker, workflow, ssl_certs=True)
with pytest.raises(PluginFailedException):
runner.run()
@pytest.mark.parametrize('fail_method', [
'get_build_logs',
'get_pod_for_build',
])
def test_koji_promote_osbs_fail(self, tmpdir, os_env, fail_method):
tasker, workflow = mock_environment(tmpdir,
name='name',
version='1.0',
release='1')
(flexmock(OSBS)
.should_receive(fail_method)
.and_raise(OsbsException))
runner = create_runner(tasker, workflow)
runner.run()
@staticmethod
def check_components(components):
assert isinstance(components, list)
assert len(components) > 0
for component_rpm in components:
assert isinstance(component_rpm, dict)
assert set(component_rpm.keys()) == set([
'type',
'name',
'version',
'release',
'epoch',
'arch',
'sigmd5',
'signature',
])
assert component_rpm['type'] == 'rpm'
assert component_rpm['name']
assert is_string_type(component_rpm['name'])
assert component_rpm['name'] != 'gpg-pubkey'
assert component_rpm['version']
assert is_string_type(component_rpm['version'])
assert component_rpm['release']
epoch = component_rpm['epoch']
assert epoch is None or isinstance(epoch, int)
assert is_string_type(component_rpm['arch'])
assert component_rpm['signature'] != '(none)'
def validate_buildroot(self, buildroot):
assert isinstance(buildroot, dict)
assert set(buildroot.keys()) == set([
'id',
'host',
'content_generator',
'container',
'tools',
'components',
'extra',
])
host = buildroot['host']
assert isinstance(host, dict)
assert set(host.keys()) == set([
'os',
'arch',
])
assert host['os']
assert is_string_type(host['os'])
assert host['arch']
assert is_string_type(host['arch'])
assert host['arch'] != 'amd64'
content_generator = buildroot['content_generator']
assert isinstance(content_generator, dict)
assert set(content_generator.keys()) == set([
'name',
'version',
])
assert content_generator['name']
assert is_string_type(content_generator['name'])
assert content_generator['version']
assert is_string_type(content_generator['version'])
container = buildroot['container']
assert isinstance(container, dict)
assert set(container.keys()) == set([
'type',
'arch',
])
assert container['type'] == 'docker'
assert container['arch']
assert is_string_type(container['arch'])
assert isinstance(buildroot['tools'], list)
assert len(buildroot['tools']) > 0
for tool in buildroot['tools']:
assert isinstance(tool, dict)
assert set(tool.keys()) == set([
'name',
'version',
])
assert tool['name']
assert is_string_type(tool['name'])
assert tool['version']
assert is_string_type(tool['version'])
self.check_components(buildroot['components'])
extra = buildroot['extra']
assert isinstance(extra, dict)
assert set(extra.keys()) == set([
'osbs',
])
assert 'osbs' in extra
osbs = extra['osbs']
assert isinstance(osbs, dict)
assert set(osbs.keys()) == set([
'build_id',
'builder_image_id',
])
assert is_string_type(osbs['build_id'])
assert is_string_type(osbs['builder_image_id'])
def validate_output(self, output, metadata_only):
if metadata_only:
mdonly = set()
else:
mdonly = set(['metadata_only'])
assert isinstance(output, dict)
assert 'type' in output
assert 'buildroot_id' in output
assert 'filename' in output
assert output['filename']
assert is_string_type(output['filename'])
assert 'filesize' in output
assert int(output['filesize']) > 0 or metadata_only
assert 'arch' in output
assert output['arch']
assert is_string_type(output['arch'])
assert 'checksum' in output
assert output['checksum']
assert is_string_type(output['checksum'])
assert 'checksum_type' in output
assert output['checksum_type'] == 'md5'
assert is_string_type(output['checksum_type'])
assert 'type' in output
if output['type'] == 'log':
assert set(output.keys()) == set([
'buildroot_id',
'filename',
'filesize',
'arch',
'checksum',
'checksum_type',
'type',
'metadata_only', # only when True
]) - mdonly
assert output['arch'] == 'noarch'
else:
assert set(output.keys()) == set([
'buildroot_id',
'filename',
'filesize',
'arch',
'checksum',
'checksum_type',
'type',
'components',
'extra',
'metadata_only', # only when True
]) - mdonly
assert output['type'] == 'docker-image'
assert is_string_type(output['arch'])
assert output['arch'] != 'noarch'
assert output['arch'] in output['filename']
self.check_components(output['components'])
extra = output['extra']
assert isinstance(extra, dict)
assert set(extra.keys()) == set([
'image',
'docker',
])
image = extra['image']
assert isinstance(image, dict)
assert set(image.keys()) == set([
'arch',
])
assert image['arch'] == output['arch'] # what else?
assert 'docker' in extra
docker = extra['docker']
assert isinstance(docker, dict)
assert set(docker.keys()) == set([
'parent_id',
'id',
'repositories',
])
assert is_string_type(docker['parent_id'])
assert is_string_type(docker['id'])
repositories = docker['repositories']
assert isinstance(repositories, list)
repositories_digest = list(filter(lambda repo: '@sha256' in repo, repositories))
repositories_tag = list(filter(lambda repo: '@sha256' not in repo, repositories))
assert len(repositories_tag) == len(repositories_digest)
# check for duplicates
assert sorted(repositories_tag) == sorted(set(repositories_tag))
assert sorted(repositories_digest) == sorted(set(repositories_digest))
for repository in repositories_tag:
assert is_string_type(repository)
image = ImageName.parse(repository)
assert image.registry
assert image.namespace
assert image.repo
assert image.tag and image.tag != 'latest'
digest_pullspec = image.to_str(tag=False) + '@' + fake_digest(image)
assert digest_pullspec in repositories_digest
def test_koji_promote_import_fail(self, tmpdir, os_env, caplog):
session = MockedClientSession('')
(flexmock(session)
.should_receive('CGImport')
.and_raise(RuntimeError))
name = 'ns/name'
version = '1.0'
release = '1'
target = 'images-docker-candidate'
tasker, workflow = mock_environment(tmpdir,
name=name,
version=version,
release=release,
session=session)
runner = create_runner(tasker, workflow, target=target)
with pytest.raises(PluginFailedException):
runner.run()
assert 'metadata:' in caplog.text()
@pytest.mark.parametrize('task_states', [
['FREE', 'ASSIGNED', 'FAILED'],
['CANCELED'],
[None],
])
def test_koji_promote_tag_fail(self, tmpdir, task_states, os_env):
session = MockedClientSession('', task_states=task_states)
name = 'ns/name'
version = '1.0'
release = '1'
target = 'images-docker-candidate'
tasker, workflow = mock_environment(tmpdir,
name=name,
version=version,
release=release,
session=session)
runner = create_runner(tasker, workflow, target=target)
with pytest.raises(PluginFailedException):
runner.run()
def test_koji_promote_filesystem_koji_task_id(self, tmpdir, os_env):
session = MockedClientSession('')
tasker, workflow = mock_environment(tmpdir,
name='ns/name',
version='1.0',
release='1',
session=session)
task_id = 1234
workflow.prebuild_results[AddFilesystemPlugin.key] = {
'base-image-id': 'abcd',
'filesystem-koji-task-id': task_id,
}
runner = create_runner(tasker, workflow)
runner.run()
data = session.metadata
assert 'build' in data
build = data['build']
assert isinstance(build, dict)
assert 'extra' in build
extra = build['extra']
assert isinstance(extra, dict)
assert 'filesystem_koji_task_id' in extra
filesystem_koji_task_id = extra['filesystem_koji_task_id']
assert is_string_type(filesystem_koji_task_id)
assert filesystem_koji_task_id == str(task_id)
def test_koji_promote_filesystem_koji_task_id_missing(self, tmpdir, os_env,
caplog):
session = MockedClientSession('')
tasker, workflow = mock_environment(tmpdir,
name='ns/name',
version='1.0',
release='1',
session=session)
task_id = 1234
workflow.prebuild_results[AddFilesystemPlugin.key] = {
'base-image-id': 'abcd',
}
runner = create_runner(tasker, workflow)
runner.run()
data = session.metadata
assert 'build' in data
build = data['build']
assert isinstance(build, dict)
assert 'extra' in build
extra = build['extra']
assert isinstance(extra, dict)
assert 'filesystem_koji_task_id' not in extra
assert AddFilesystemPlugin.key in caplog.text()
@pytest.mark.parametrize(('apis',
'pulp_registries',
'metadata_only',
'blocksize',
'target'), [
('v1-only',
1,
False,
None,
'images-docker-candidate'),
('v1+v2',
2,
False,
10485760,
None),
('v2-only',
1,
True,
None,
None),
])
def test_koji_promote_success(self, tmpdir, apis, pulp_registries,
metadata_only, blocksize, target, os_env):
session = MockedClientSession('')
component = 'component'
name = 'ns/name'
version = '1.0'
release = '1'
tasker, workflow = mock_environment(tmpdir,
session=session,
name=name,
component=component,
version=version,
release=release,
pulp_registries=pulp_registries,
blocksize=blocksize)
runner = create_runner(tasker, workflow, metadata_only=metadata_only,
blocksize=blocksize, target=target)
runner.run()
data = session.metadata
if metadata_only:
mdonly = set()
else:
mdonly = set(['metadata_only'])
output_filename = 'koji_promote-{0}.json'.format(apis)
with open(output_filename, 'w') as out:
json.dump(data, out, sort_keys=True, indent=4)
assert set(data.keys()) == set([
'metadata_version',
'build',
'buildroots',
'output',
])
assert data['metadata_version'] in ['0', 0]
build = data['build']
assert isinstance(build, dict)
buildroots = data['buildroots']
assert isinstance(buildroots, list)
assert len(buildroots) > 0
output_files = data['output']
assert isinstance(output_files, list)
assert set(build.keys()) == set([
'name',
'version',
'release',
'source',
'start_time',
'end_time',
'extra', # optional but always supplied
'metadata_only', # only when True
]) - mdonly
assert build['name'] == component
assert build['version'] == version
assert build['release'] == release
assert build['source'] == 'git://hostname/path#123456'
start_time = build['start_time']
assert isinstance(start_time, int) and start_time
end_time = build['end_time']
assert isinstance(end_time, int) and end_time
if metadata_only:
assert isinstance(build['metadata_only'], bool)
assert build['metadata_only']
extra = build['extra']
assert isinstance(extra, dict)
for buildroot in buildroots:
self.validate_buildroot(buildroot)
# Unique within buildroots in this metadata
assert len([b for b in buildroots
if b['id'] == buildroot['id']]) == 1
for output in output_files:
self.validate_output(output, metadata_only)
buildroot_id = output['buildroot_id']
# References one of the buildroots
assert len([buildroot for buildroot in buildroots
if buildroot['id'] == buildroot_id]) == 1
if metadata_only:
assert isinstance(output['metadata_only'], bool)
assert output['metadata_only']
files = session.uploaded_files
# There should be a file in the list for each output
# except for metadata-only imports, in which case there
# will be no upload for the image itself
assert isinstance(files, list)
expected_uploads = len(output_files)
if metadata_only:
expected_uploads -= 1
assert len(files) == expected_uploads
# The correct blocksize argument should have been used
if blocksize is not None:
assert blocksize == session.blocksize
build_id = runner.plugins_results[KojiPromotePlugin.key]
assert build_id == "123"
if target is not None:
assert session.build_tags[build_id] == session.DEST_TAG
assert session.tag_task_state == 'CLOSED'
def test_koji_promote_without_build_info(self, tmpdir, os_env):
class LegacyCGImport(MockedClientSession):
def CGImport(self, *args, **kwargs):
super(LegacyCGImport, self).CGImport(*args, **kwargs)
return
session = LegacyCGImport('')
name = 'ns/name'
version = '1.0'
release = '1'
tasker, workflow = mock_environment(tmpdir,
session=session,
name=name,
version=version,
release=release)
runner = create_runner(tasker, workflow)
runner.run()
assert runner.plugins_results[KojiPromotePlugin.key] is None
| {
"content_hash": "645fdbcca4e29f762fb5372cbe29766c",
"timestamp": "",
"source": "github",
"line_count": 991,
"max_line_length": 93,
"avg_line_length": 35.32088799192734,
"alnum_prop": 0.5270119704025369,
"repo_name": "jpopelka/atomic-reactor",
"id": "817b93686992242eda6bc7b428c7a448329acaf7",
"size": "35003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/plugins/test_koji_promote.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "570871"
},
{
"name": "Shell",
"bytes": "3589"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import time
import serial
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
def animate(i):
pullData = ''
with open('meusdados.txt', 'r+') as arq:
with serial.Serial('/dev/ttyACM0',9600,timeout=1) as ser:
arq.write(ser.readline())
pullData = arq.read()
dataArray = pullData.split('\n')
xar = []
yar = []
for eachLine in dataArray:
if len(eachLine)>1:
x,y = eachLine.split(',')
xar.append(int(x))
yar.append(int(y))
ax1.plot(xar,yar)
ani = animation.FuncAnimation(fig, animate, interval = 100)
plt.show()
| {
"content_hash": "e3cfdc50e1f12f98c5401237d5d1c391",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 59,
"avg_line_length": 22.37037037037037,
"alnum_prop": 0.6788079470198676,
"repo_name": "italogfernandes/minicurso-arduino-avancado",
"id": "4f6255ec51a9f4220495c06f2639847bf95b24fc",
"size": "605",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/DynamicPlot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "1499"
},
{
"name": "C#",
"bytes": "9807"
},
{
"name": "Matlab",
"bytes": "293"
},
{
"name": "Python",
"bytes": "2761"
}
],
"symlink_target": ""
} |
"""Classes used for tracking game state.
The game state is defined as all the data which is being used by the game
itself. This excludes things like the package listing, the logging class, and
various other bits defined in the rpg.app.Game class.
"""
from enum import IntEnum, unique
from rpg.data import actor, resource, resources
import typing
if typing.TYPE_CHECKING:
from rpg.app import Game
from rpg.data.actor import NonPlayerCharacter
from rpg.data.resource import Dialog
from rpg.data.location import Location
from rpg.ui import views
from typing import Any, Dict, Optional
@unique
class GameState(IntEnum):
"""Enumeration which tracks the current state of the GameData class."""
Stopped = 0
Location = 1
Dialog = 2
Fight = 3
class GameData(object):
"""Collection of data which can be thought of as the games state.
The GameData class can be thought of as the game state for a particular
instance of the game. This is the object which can be loaded and saved to
files more or less completely to implement a save game feature.
"""
def __init__(self, game_object: 'Game') -> None:
"""Initialize the GameData instance
:param game_object: The Game instance holding this GameData instance
"""
self._game_object = game_object # type: Game
self._state = GameState.Stopped # type: GameState
self._add_loc_text = False # type: bool
self.player = actor.Player()
self.monster = None # type: Optional[NonPlayerCharacter]
self.location = None # type: Optional[Location]
self.fight = None # type: None
self.dialog = None # type: Optional[Dialog]
self.time = None # type: None
self.resources = resources.Resources()
self.variables = dict() # type: Dict[str, Any]
self.temp = dict() # type: Dict[str, Any]
def start(self) -> None:
"""Set the current location, fight, and dialog to None and then apply
all callbacks.
"""
self.location = None
self.fight = None
self.dialog = None
r_type = resource.ResourceType.Callback
for _, _, callback in self.resources.enumerate(r_type):
callback.apply(self._game_object)
if self.location is None:
self._game_object.log.error(
"No initial location set by start callbacks"
)
self.player.inventory.bind(self._game_object)
def set_location(self, location_id: str) -> None:
"""Attempt to change the current location to the location denoted by
the given resource_id.
:param location_id: The resource_id of the new location
"""
instance = self.resources.get(
resource.ResourceType.Location, location_id
)
if instance is None:
self._game_object.log.error(
"Could not find location {}", location_id
)
return
self._state = GameState.Location
self.location = typing.cast('Location', instance)
self._add_loc_text = True
self.location.start(self._game_object)
# Only display if the current state is GameState.Location and our
# instance is the correct instance
if self._state == GameState.Location and instance is self.location:
if self.location is not None:
self._display(self.location)
def set_dialog(self, dialog_id: str) -> None:
"""Attempt to set the current dialog to the one denoted by the given
resource id.
:param dialog_id: The resource id of the dialog to switch to
"""
instance = self.resources.get(resource.ResourceType.Dialog, dialog_id)
if instance is None:
self._game_object.log.error("Could not find dialog {}", dialog_id)
return
else:
self._state = GameState.Dialog
self.dialog = typing.cast('Dialog', instance)
self._display(self.dialog)
def set_fight(self, monster_id: str) -> None:
self._state = GameState.Fight
self.monster = self.resources.get(
resource.ResourceType.Actor, monster_id
)
view = self._game_object.stack.current()
if view.is_game_view():
view.fight_start(self.monster)
def stop_fight(self) -> None:
self._state = GameState.Location
view = self._game_object.stack.current()
if view.is_game_view():
view.fight_end()
self.resume_display()
def game_view(self) -> 'Optional[views.GameView]':
"""Get the GameView instance, assuming it is the current view.
:return: The current view as a GameView, assuming it implements the
GameView interface
"""
view = self._game_object.stack.current()
return view if view.is_game_view() else None
def state(self) -> GameState:
"""Get the GameState value representing the current state of the game.
:return: The current state of the game
"""
return self._state
def resume_display(self) -> None:
"""Force the current displayable to be redisplayed.
Which displayable is displayed is dependent on the current state of the
game.
"""
if self._state == GameState.Location:
self._display(self.location)
elif self._state == GameState.Dialog:
self._display(self.dialog)
else:
self._game_object.log.error(
"GameData::resume_display(): Cannot resume for state {}",
self._state.name
)
def _display(self, displayable: 'resource.Displayable'):
"""Display the given Displayable resource if the current View is the
GameView.
:param displayable: The displayable to display
"""
view = self.game_view()
if view is not None:
view.display(displayable)
| {
"content_hash": "cb1d18ef20fe893ad7baf0cb2200992c",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 79,
"avg_line_length": 34.8150289017341,
"alnum_prop": 0.617632409098456,
"repo_name": "tvarney/txtrpg",
"id": "3b5485e0c56bd06f750eae35dc06b1f5c24ba40f",
"size": "6023",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rpg/state.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "211457"
}
],
"symlink_target": ""
} |
"""
Configuration reader and defaults.
Author: Henrik Thostrup Jensen <htj@nordu.net>
Copyright: NORDUnet (2011)
"""
import os
import configparser
from opennsa import constants as cnt
# defaults
DEFAULT_CONFIG_FILE = '/etc/opennsa.conf'
DEFAULT_LOG_FILE = '/var/log/opennsa.log'
DEFAULT_TLS = 'true'
DEFAULT_TOPOLOGY_FILE = '/usr/local/share/nsi/topology.owl'
DEFAULT_TCP_PORT = 9080
DEFAULT_TLS_PORT = 9443
DEFAULT_VERIFY = True
# This will work on most mordern linux distros
DEFAULT_CERTIFICATE_DIR = '/etc/ssl/certs'
# config blocks and options
BLOCK_SERVICE = 'service'
BLOCK_DUD = 'dud'
BLOCK_JUNIPER_EX = 'juniperex'
BLOCK_JUNIPER_VPLS = 'junipervpls'
BLOCK_FORCE10 = 'force10'
BLOCK_BROCADE = 'brocade'
BLOCK_NCSVPN = 'ncsvpn'
BLOCK_PICA8OVS = 'pica8ovs'
BLOCK_JUNOSMX = 'junosmx'
BLOCK_JUNOSEX = 'junosex'
BLOCK_JUNOSSPACE = 'junosspace'
BLOCK_OESS = 'oess'
BLOCK_CUSTOM_BACKEND = 'custombackend'
# service block
DOMAIN = 'domain' # mandatory
NETWORK_NAME = 'network' # legacy, used to be mandatory
LOG_FILE = 'logfile'
HOST = 'host'
PORT = 'port'
TLS = 'tls'
BASE_URL = 'base_url'
REST = 'rest'
NRM_MAP_FILE = 'nrmmap'
PEERS = 'peers'
POLICY = 'policy'
PLUGIN = 'plugin'
SERVICE_ID_START = 'serviceid_start'
# database
DATABASE = 'database' # mandatory
DATABASE_USER = 'dbuser' # mandatory
DATABASE_PASSWORD = 'dbpassword' # can be none (os auth)
DATABASE_HOST = 'dbhost' # can be none (local db)
# tls
KEY = 'key' # mandatory, if tls is set
CERTIFICATE = 'certificate' # mandatory, if tls is set
CERTIFICATE_DIR = 'certdir' # mandatory (but dir can be empty)
VERIFY_CERT = 'verify'
ALLOWED_HOSTS = 'allowedhosts' # comma seperated list
ALLOWED_ADMINS = 'allowed_admins' # list of requester nsaId with administration level access
# generic stuff
_SSH_HOST = 'host'
_SSH_PORT = 'port'
_SSH_HOST_FINGERPRINT = 'fingerprint'
_SSH_USER = 'user'
_SSH_PASSWORD = 'password'
_SSH_PUBLIC_KEY = 'publickey'
_SSH_PRIVATE_KEY = 'privatekey'
AS_NUMBER = 'asnumber'
# TODO: Don't do backend specifics for everything, it causes confusion, and doesn't really solve anything
# juniper block - same for mx / ex backends
JUNIPER_HOST = _SSH_HOST
JUNIPER_PORT = _SSH_PORT
JUNIPER_HOST_FINGERPRINT = _SSH_HOST_FINGERPRINT
JUNIPER_USER = _SSH_USER
JUNIPER_SSH_PUBLIC_KEY = _SSH_PUBLIC_KEY
JUNIPER_SSH_PRIVATE_KEY = _SSH_PRIVATE_KEY
# force10 block
FORCE10_HOST = _SSH_HOST
FORCE10_PORT = _SSH_PORT
FORCE10_USER = _SSH_USER
FORCE10_PASSWORD = _SSH_PASSWORD
FORCE10_HOST_FINGERPRINT = _SSH_HOST_FINGERPRINT
FORCE10_SSH_PUBLIC_KEY = _SSH_PUBLIC_KEY
FORCE10_SSH_PRIVATE_KEY = _SSH_PRIVATE_KEY
# Brocade block
BROCADE_HOST = _SSH_HOST
BROCADE_PORT = _SSH_PORT
BROCADE_HOST_FINGERPRINT = _SSH_HOST_FINGERPRINT
BROCADE_USER = _SSH_USER
BROCADE_SSH_PUBLIC_KEY = _SSH_PUBLIC_KEY
BROCADE_SSH_PRIVATE_KEY = _SSH_PRIVATE_KEY
BROCADE_ENABLE_PASSWORD = 'enablepassword'
# Pica8 OVS
PICA8OVS_HOST = _SSH_HOST
PICA8OVS_PORT = _SSH_PORT
PICA8OVS_HOST_FINGERPRINT = _SSH_HOST_FINGERPRINT
PICA8OVS_USER = _SSH_USER
PICA8OVS_SSH_PUBLIC_KEY = _SSH_PUBLIC_KEY
PICA8OVS_SSH_PRIVATE_KEY = _SSH_PRIVATE_KEY
PICA8OVS_DB_IP = 'dbip'
# NCS VPN Backend
NCS_SERVICES_URL = 'url'
NCS_USER = 'user'
NCS_PASSWORD = 'password'
# JUNOS block
JUNOS_HOST = _SSH_HOST
JUNOS_PORT = _SSH_PORT
JUNOS_HOST_FINGERPRINT = _SSH_HOST_FINGERPRINT
JUNOS_USER = _SSH_USER
JUNOS_SSH_PUBLIC_KEY = _SSH_PUBLIC_KEY
JUNOS_SSH_PRIVATE_KEY = _SSH_PRIVATE_KEY
JUNOS_ROUTERS = 'routers'
# Junosspace backend
SPACE_USER = 'space_user'
SPACE_PASSWORD = 'space_password'
SPACE_API_URL = 'space_api_url'
SPACE_ROUTERS = 'routers'
SPACE_CONFIGLET_ACTIVATE_LOCAL = 'configlet_activate_local'
SPACE_CONFIGLET_ACTIVATE_REMOTE = 'configlet_activate_remote'
SPACE_CONFIGLET_DEACTIVATE_LOCAL = 'configlet_deactivate_local'
SPACE_CONFIGLET_DEACTIVATE_REMOTE = 'configlet_deactivate_remote'
# OESS
OESS_URL = 'url'
OESS_USER = 'username'
OESS_PASSWORD = 'password'
OESS_WORKGROUP = 'workgroup'
class ConfigurationError(Exception):
"""
Raised in case of invalid/inconsistent configuration.
"""
class Peer(object):
def __init__(self, url, cost):
self.url = url
self.cost = cost
class EnvInterpolation(configparser.BasicInterpolation):
"""Interpolation which expands environment variables in values."""
def before_get(self, parser, section, option, value, defaults):
value = super().before_get(parser, section, option, value, defaults)
return os.path.expandvars(value)
class Config(object):
"""
Singleton instance of configuration class. Loads the config and persists it to class object.
Also, provides utility function around the loaded configuration
"""
_instance = None
def __init__(self):
raise RuntimeError("Call instance() instead, singleton class")
@classmethod
def instance(cls):
if cls._instance is None:
print('Creating new instance')
cls._instance = cls.__new__(cls)
cls._instance.cfg = None
cls._instance.vc = None
# Put any initialization here.
return cls._instance
def read_config(self, filename):
"""
Load the configuration from a given file
"""
if self._instance.cfg is None:
cfg = configparser.ConfigParser(interpolation=EnvInterpolation())
cfg.add_section(BLOCK_SERVICE)
cfg.read([filename])
self._instance.cfg = cfg
return self._instance.cfg, self._read_verify_config()
def _read_verify_config(self):
"""
Returns a dictionary of the loaded config once verified
"""
if self._instance.vc is None:
self._instance.vc = self._load_config_dict()
return self._instance.vc
def config_dict(self):
"""
Returns the loaded dict if one exists, or an empty one otherwise.
"""
return self._instance.vc if self._instance.vc is not None else {}
@property
def allowed_admins(self):
"""
Property returns array of allowed admins
"""
return self.config_dict().get(ALLOWED_ADMINS, '')
def is_admin_override(self, urn):
"""
Check if the URN matches a valid admin. Allowing all queries to execute
"""
admins = self.allowed_admins
for entry in self.allowed_admins:
if entry == urn:
return True
return False
def _load_database_config(self, vc):
# vc = self._instance.vc
cfg = self._instance.cfg
# database
try:
vc[DATABASE] = cfg.get(BLOCK_SERVICE, DATABASE)
except configparser.NoOptionError:
raise ConfigurationError(
'No database specified in configuration file (mandatory)')
try:
vc[DATABASE_USER] = cfg.get(BLOCK_SERVICE, DATABASE_USER)
except configparser.NoOptionError:
raise ConfigurationError(
'No database user specified in configuration file (mandatory)')
vc[DATABASE_PASSWORD] = cfg.get(BLOCK_SERVICE, DATABASE_PASSWORD, fallback=None)
vc[DATABASE_HOST] = cfg.get(BLOCK_SERVICE, DATABASE_HOST, fallback='localhost')
vc[SERVICE_ID_START] = cfg.get(BLOCK_SERVICE, SERVICE_ID_START, fallback=None)
def _load_config_dict(self) -> dict:
"""
Read a config and verify that things are correct. Will also fill in
default values where applicable.
This is supposed to be used during application creation (before service
start) to ensure that simple configuration errors do not pop up efter
daemonization.
Returns a "verified" config, which is a dictionary.
"""
cfg = self._instance.cfg
vc = {}
# Check for deprecated / old invalid stuff
try:
cfg.get(BLOCK_SERVICE, NRM_MAP_FILE)
raise ConfigurationError(
'NRM Map file should be specified under backend')
except configparser.NoOptionError:
pass
# check / extract
try:
vc[DOMAIN] = cfg.get(BLOCK_SERVICE, DOMAIN)
except configparser.NoOptionError:
raise ConfigurationError(
'No domain name specified in configuration file (mandatory, see docs/migration)')
try:
cfg.get(BLOCK_SERVICE, NETWORK_NAME)
raise ConfigurationError(
'Network name no longer used, use domain (see docs/migration)')
except configparser.NoOptionError:
pass
vc[LOG_FILE] = cfg.get(BLOCK_SERVICE, LOG_FILE, fallback=DEFAULT_LOG_FILE)
try:
nrm_map_file = cfg.get(BLOCK_SERVICE, NRM_MAP_FILE)
if not os.path.exists(nrm_map_file):
raise ConfigurationError(
'Specified NRM mapping file does not exist (%s)' % nrm_map_file)
vc[NRM_MAP_FILE] = nrm_map_file
except configparser.NoOptionError:
vc[NRM_MAP_FILE] = None
vc[REST] = cfg.getboolean(BLOCK_SERVICE, REST, fallback=False)
try:
peers_raw = cfg.get(BLOCK_SERVICE, PEERS)
vc[PEERS] = [Peer(purl.strip(), 1) for purl in peers_raw.split('\n')]
except configparser.NoOptionError:
vc[PEERS] = None
vc[HOST] = cfg.get(BLOCK_SERVICE, HOST, fallback=None)
vc[TLS] = cfg.getboolean(BLOCK_SERVICE, TLS, fallback=DEFAULT_TLS)
vc[PORT] = cfg.getint(BLOCK_SERVICE, PORT, fallback=DEFAULT_TLS_PORT if vc[TLS] else DEFAULT_TCP_PORT)
try:
vc[BASE_URL] = cfg.get(BLOCK_SERVICE, BASE_URL)
except configparser.NoOptionError:
vc[BASE_URL] = None
try:
vc[KEY] = cfg.get(BLOCK_SERVICE, KEY)
except configparser.NoOptionError:
vc[KEY] = None
try:
vc[CERTIFICATE] = cfg.get(BLOCK_SERVICE, CERTIFICATE)
except configparser.NoOptionError:
vc[CERTIFICATE] = None
try:
policies = cfg.get(BLOCK_SERVICE, POLICY).split(',')
for policy in policies:
if not policy in (cnt.REQUIRE_USER, cnt.REQUIRE_TRACE, cnt.AGGREGATOR, cnt.ALLOW_HAIRPIN):
raise ConfigurationError('Invalid policy: %s' % policy)
vc[POLICY] = policies
except configparser.NoOptionError:
vc[POLICY] = []
vc[PLUGIN] = cfg.get(BLOCK_SERVICE, PLUGIN, fallback=None)
self._load_database_config(vc)
self._load_certificates(vc)
## Set override of allowed Admins
allowed_hosts_admins = cfg.get(BLOCK_SERVICE, ALLOWED_ADMINS, fallback='')
vc[ALLOWED_ADMINS] = [i.strip() for i in allowed_hosts_admins.split(',') if len(i) > 0]
# backends
self._load_backends(vc)
return vc
def _load_certificates(self, vc):
cfg = self._instance.cfg
# we always extract certdir and verify as we need that for performing https requests
try:
certdir = cfg.get(BLOCK_SERVICE, CERTIFICATE_DIR)
if not os.path.exists(certdir):
raise ConfigurationError(
'Specified certdir does not exist (%s)' % certdir)
vc[CERTIFICATE_DIR] = certdir
except configparser.NoOptionError:
vc[CERTIFICATE_DIR] = DEFAULT_CERTIFICATE_DIR
try:
vc[VERIFY_CERT] = cfg.getboolean(BLOCK_SERVICE, VERIFY_CERT)
except configparser.NoOptionError:
vc[VERIFY_CERT] = DEFAULT_VERIFY
# tls
if vc[TLS]:
try:
if not vc[KEY]:
raise ConfigurationError(
'must specify a key when TLS is enabled')
elif not os.path.exists(vc[KEY]):
raise ConfigurationError(
'Specified key does not exist (%s)' % vc[KEY])
if not vc[CERTIFICATE]:
raise ConfigurationError(
'must specify a certificate when TLS is enabled')
elif not os.path.exists(vc[CERTIFICATE]):
raise ConfigurationError(
'Specified certificate does not exist (%s)' % vc[CERTIFICATE])
try:
allowed_hosts_cfg = cfg.get(BLOCK_SERVICE, ALLOWED_HOSTS)
vc[ALLOWED_HOSTS] = [i.strip() for i in allowed_hosts_cfg.split(',') if len(i) > 0]
except:
pass
except configparser.NoOptionError as e:
# Not enough options for configuring tls context
raise ConfigurationError('Missing TLS option: %s' % str(e))
def _load_backends(self, vc):
"""
Verify and load backends into configuration class
"""
cfg = self._instance.cfg
backends = {}
for section in cfg.sections():
if section == 'service':
continue
if ':' in section:
backend_type, name = section.split(':', 2)
else:
backend_type = section
name = ''
if name in backends:
raise ConfigurationError(
'Can only have one backend named "%s"' % name)
if backend_type in (
BLOCK_DUD, BLOCK_JUNIPER_EX, BLOCK_JUNIPER_VPLS, BLOCK_JUNOSMX, BLOCK_FORCE10, BLOCK_BROCADE,
BLOCK_NCSVPN, BLOCK_PICA8OVS, BLOCK_OESS, BLOCK_JUNOSSPACE, BLOCK_JUNOSEX,
BLOCK_CUSTOM_BACKEND, 'asyncfail'):
backend_conf = dict(cfg.items(section))
backend_conf['_backend_type'] = backend_type
backends[name] = backend_conf
vc['backend'] = backends
| {
"content_hash": "a84c9f34ca15c447c59ed07574cd93dd",
"timestamp": "",
"source": "github",
"line_count": 424,
"max_line_length": 113,
"avg_line_length": 32.615566037735846,
"alnum_prop": 0.6223877359172753,
"repo_name": "NORDUnet/opennsa",
"id": "90ba7e5a9a8e11853b366b057d4855305e81adfc",
"size": "13829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opennsa/config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1165"
},
{
"name": "HTML",
"bytes": "1480"
},
{
"name": "Makefile",
"bytes": "244"
},
{
"name": "Python",
"bytes": "746817"
},
{
"name": "SCSS",
"bytes": "63"
},
{
"name": "Shell",
"bytes": "4132"
}
],
"symlink_target": ""
} |
from ...utils import validate_type
from ..base_1d import BaseEM1DSimulation
from .receivers import PointMagneticFieldSecondary, PointMagneticField
from .survey import Survey
import numpy as np
from geoana.kernels.tranverse_electric_reflections import rTE_forward, rTE_gradient
#######################################################################
# SIMULATION FOR A SINGLE SOUNDING
#######################################################################
class Simulation1DLayered(BaseEM1DSimulation):
"""
Simulation class for simulating the FEM response over a 1D layered Earth
for a single sounding.
"""
def __init__(self, survey=None, **kwargs):
super().__init__(survey=survey, **kwargs)
self._coefficients_set = False
@property
def survey(self):
"""The simulations survey.
Returns
-------
SimPEG.electromagnetics.frequency_domain.survey.Survey
"""
if self._survey is None:
raise AttributeError("Simulation must have a survey set")
return self._survey
@survey.setter
def survey(self, value):
if value is not None:
value = validate_type("survey", value, Survey, cast=False)
self._survey = value
def get_coefficients(self):
if self._coefficients_set is False:
self._compute_coefficients()
return (
self._i_freq,
self._lambs,
self._unique_lambs,
self._inv_lambs,
self._C0s,
self._C1s,
self._W,
)
def _set_coefficients(self, coefficients):
self._i_freq = coefficients[0]
self._lambs = coefficients[1]
self._unique_lambs = coefficients[2]
self._inv_lambs = coefficients[3]
self._C0s = coefficients[4]
self._C1s = coefficients[5]
self._W = coefficients[6]
self._coefficients_set = True
def _compute_coefficients(self):
if self._coefficients_set:
return
self._compute_hankel_coefficients()
survey = self.survey
# loop through source and receiver lists to create offsets
# get unique source-receiver offsets
frequencies = np.array(survey.frequencies)
# Compute coefficients for Hankel transform
i_freq = []
for i_src, src in enumerate(survey.source_list):
class_name = type(src).__name__
is_wire_loop = class_name == "LineCurrent"
i_f = np.searchsorted(frequencies, src.frequency)
for i_rx, rx in enumerate(src.receiver_list):
if is_wire_loop:
n_quad_points = src.n_segments * self.n_points_per_path
i_freq.append([i_f] * rx.locations.shape[0] * n_quad_points)
else:
i_freq.append([i_f] * rx.locations.shape[0])
self._i_freq = np.hstack(i_freq)
self._coefficients_set = True
def dpred(self, m, f=None):
"""
Return predicted data.
Predicted data, (`_pred`) are computed when
self.fields is called.
"""
if f is None:
f = self.fields(m)
return f
def fields(self, m):
"""
This method evaluates the Hankel transform for each source and
receiver and outputs it as a list. Used for computing response
or sensitivities.
"""
self._compute_coefficients()
self.model = m
C0s = self._C0s
C1s = self._C1s
lambs = self._lambs
W = self._W
frequencies = np.array(self.survey.frequencies)
unique_lambs = self._unique_lambs
i_freq = self._i_freq
inv_lambs = self._inv_lambs
sig = self.compute_complex_sigma(frequencies)
mu = self.compute_complex_mu(frequencies)
rTE = rTE_forward(frequencies, unique_lambs, sig, mu, self.thicknesses)
rTE = rTE[i_freq]
rTE = np.take_along_axis(rTE, inv_lambs, axis=1)
v = W @ ((C0s * rTE) @ self._fhtfilt.j0 + (C1s * rTE) @ self._fhtfilt.j1)
return self._project_to_data(v)
def getJ(self, m, f=None):
self.model = m
if getattr(self, "_J", None) is None:
self._J = {}
self._compute_coefficients()
C0s = self._C0s
C1s = self._C1s
lambs = self._lambs
frequencies = np.array(self.survey.frequencies)
unique_lambs = self._unique_lambs
i_freq = self._i_freq
inv_lambs = self._inv_lambs
W = self._W
sig = self.compute_complex_sigma(frequencies)
mu = self.compute_complex_mu(frequencies)
if self.hMap is not None:
# Grab a copy
C0s_dh = C0s.copy()
C1s_dh = C1s.copy()
h_vec = self.h
i = 0
for i_src, src in enumerate(self.survey.source_list):
class_name = type(src).__name__
is_wire_loop = class_name == "LineCurrent"
h = h_vec[i_src]
if is_wire_loop:
n_quad_points = src.n_segments * self.n_points_per_path
nD = sum(
rx.locations.shape[0] * n_quad_points
for rx in src.receiver_list
)
else:
nD = sum(rx.locations.shape[0] for rx in src.receiver_list)
ip1 = i + nD
v = np.exp(-lambs[i:ip1] * h)
C0s_dh[i:ip1] *= v * -lambs[i:ip1]
C1s_dh[i:ip1] *= v * -lambs[i:ip1]
i = ip1
# J will be n_d * n_src (each source has it's own h)...
rTE = rTE_forward(frequencies, unique_lambs, sig, mu, self.thicknesses)
rTE = rTE[i_freq]
rTE = np.take_along_axis(rTE, inv_lambs, axis=1)
v_dh_temp = (C0s_dh * rTE) @ self._fhtfilt.j0 + (
C1s_dh * rTE
) @ self._fhtfilt.j1
v_dh_temp += W @ v_dh_temp
# need to re-arange v_dh as it's currently (n_data x 1)
# however it already contains all the relevant information...
# just need to map it from the rx index to the source index associated..
v_dh = np.zeros((self.survey.nSrc, v_dh_temp.shape[0]))
i = 0
for i_src, src in enumerate(self.survey.source_list):
class_name = type(src).__name__
is_wire_loop = class_name == "LineCurrent"
if is_wire_loop:
n_quad_points = src.n_segments * self.n_points_per_path
nD = sum(
rx.locations.shape[0] * n_quad_points
for rx in src.receiver_list
)
else:
nD = sum(rx.locations.shape[0] for rx in src.receiver_list)
ip1 = i + nD
v_dh[i_src, i:ip1] = v_dh_temp[i:ip1]
i = ip1
v_dh = v_dh.T
self._J["dh"] = self._project_to_data(v_dh)
if (
self.sigmaMap is not None
or self.muMap is not None
or self.thicknessesMap is not None
):
rTE_ds, rTE_dh, rTE_dmu = rTE_gradient(
frequencies, unique_lambs, sig, mu, self.thicknesses
)
if self.sigmaMap is not None:
rTE_ds = rTE_ds[:, i_freq]
rTE_ds = np.take_along_axis(rTE_ds, inv_lambs[None, ...], axis=-1)
v_ds = (
(
(C0s * rTE_ds) @ self._fhtfilt.j0
+ (C1s * rTE_ds) @ self._fhtfilt.j1
)
@ W.T
).T
self._J["ds"] = self._project_to_data(v_ds)
if self.muMap is not None:
rTE_dmu = rTE_dmu[:, i_freq]
rTE_dmu = np.take_along_axis(rTE_dmu, inv_lambs[None, ...], axis=-1)
v_dmu = (
(
(C0s * rTE_dmu) @ self._fhtfilt.j0
+ (C1s * rTE_dmu) @ self._fhtfilt.j1
)
@ W.T
).T
self._J["dmu"] = self._project_to_data(v_dmu)
if self.thicknessesMap is not None:
rTE_dh = rTE_dh[:, i_freq]
rTE_dh = np.take_along_axis(rTE_dh, inv_lambs[None, ...], axis=-1)
v_dthick = (
(
(C0s * rTE_dh) @ self._fhtfilt.j0
+ (C1s * rTE_dh) @ self._fhtfilt.j1
)
@ W.T
).T
self._J["dthick"] = self._project_to_data(v_dthick)
return self._J
def _project_to_data(self, v):
i_dat = 0
i_v = 0
if v.ndim == 1:
out = np.zeros(self.survey.nD)
else:
out = np.zeros((self.survey.nD, v.shape[1]))
for i_src, src in enumerate(self.survey.source_list):
class_name = type(src).__name__
is_wire_loop = class_name == "LineCurrent"
for i_rx, rx in enumerate(src.receiver_list):
i_dat_p1 = i_dat + rx.nD
i_v_p1 = i_v + rx.locations.shape[0]
v_slice = v[i_v:i_v_p1]
if isinstance(rx, PointMagneticFieldSecondary):
if rx.data_type == "ppm":
if is_wire_loop:
raise NotImplementedError(
"Primary field for LineCurrent has not been implemented"
)
if v_slice.ndim == 2:
v_slice /= src.hPrimary(self)[i_rx][:, None]
else:
v_slice /= src.hPrimary(self)[i_rx]
v_slice *= 1e6
elif isinstance(rx, PointMagneticField):
if is_wire_loop:
raise NotImplementedError(
"Primary field for LineCurrent has not been implemented"
)
if v_slice.ndim == 2:
pass
# here because it was called on sensitivity (so don't add)
else:
v_slice += src.hPrimary(self)[i_rx]
if rx.component == "both":
out[i_dat:i_dat_p1:2] = v_slice.real
out[i_dat + 1 : i_dat_p1 : 2] = v_slice.imag
elif rx.component == "real":
out[i_dat:i_dat_p1] = v_slice.real
elif rx.component == "imag":
out[i_dat:i_dat_p1] = v_slice.imag
i_dat = i_dat_p1
i_v = i_v_p1
return out
| {
"content_hash": "167d6249bdc2850d1e847f46b0c53809",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 88,
"avg_line_length": 38.48979591836735,
"alnum_prop": 0.46005655708731,
"repo_name": "simpeg/simpeg",
"id": "f4d22f0ad3df84b19f77446e67ae1cfd3eb3a0af",
"size": "11316",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "SimPEG/electromagnetics/frequency_domain/simulation_1d.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "685"
},
{
"name": "Python",
"bytes": "3476002"
}
],
"symlink_target": ""
} |
import json
import nltk
from urllib.request import urlopen
import utils.prettylogger as pl
from nltk.corpus import *
def word_links():
# to_analyze = nltk.data.load(, format='auto', cache=true,
# verbose=False, logic_parser=None, fstruct_reader=None, encoding=None)
# Download dict.json word resource file
pl.plogger.okblue("Downloading word list...")
response = urlopen(
"https://raw.githubusercontent.com/adambom/dictionary/master/dictionary.json")
raw = response.read().decode('utf8')
pl.plogger.okgreen("Finished downloading word list")
pl.plogger.okblue("Creating entities...")
tokens = nltk.word_tokenize(raw)
text = nltk.Text(tokens)
tagged = nltk.pos_tag(text)
entities = nltk.chunk.ne_chunk(tagged)
pl.plogger.okgreen("Finished creating entities...")
return entities
def json_dictionary_loader():
with open('dict.json') as json_data:
d = json.load(json_data)
json_data.close()
return d
if __name__ == "__main__":
print("This ran correctly %s", word_links())
| {
"content_hash": "9b959c29c531d85d87c6edd406fdb3c1",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 86,
"avg_line_length": 31.676470588235293,
"alnum_prop": 0.6722376973073352,
"repo_name": "EngLang/EL-Python",
"id": "3907834ee7f698c2aa9d20229ee29f0113a57388",
"size": "1077",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "el_parser/dict_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "67"
},
{
"name": "Python",
"bytes": "2559"
}
],
"symlink_target": ""
} |
import pytz
import sqlalchemy
from sqlalchemy import types
from sqlalchemy.dialects import sqlite
from backend.extensions import db
# alias common names
BigInteger = db.BigInteger().with_variant(sqlite.INTEGER(), 'sqlite') # type: sqlalchemy.types.BigInteger
Boolean = db.Boolean # type: sqlalchemy.types.Boolean
Date = db.Date # type: sqlalchemy.types.Date
Enum = db.Enum # type: sqlalchemy.types.Enum
Float = db.Float # type: sqlalchemy.types.Float
ForeignKey = db.ForeignKey # type: sqlalchemy.schema.ForeignKey
Integer = db.Integer # type: sqlalchemy.types.Integer
Interval = db.Interval # type: sqlalchemy.types.Interval
Numeric = db.Numeric # type: sqlalchemy.types.Numeric
SmallInteger = db.SmallInteger # type: sqlalchemy.types.SmallInteger
String = db.String # type: sqlalchemy.types.String
Text = db.Text # type: sqlalchemy.types.Text
Time = db.Time # type: sqlalchemy.types.Time
class DateTime(types.TypeDecorator):
impl = types.DateTime
def __init__(self, *args, **kwargs):
kwargs['timezone'] = True
super().__init__(*args, **kwargs)
def process_bind_param(self, value, dialect):
if value is not None:
if value.tzinfo is None:
raise ValueError('Cannot persist timezone-naive datetime')
return value.astimezone(pytz.UTC)
def process_result_value(self, value, dialect):
if value is not None:
return value.astimezone(pytz.UTC)
| {
"content_hash": "bbad89964e9efd4130cd31e49010eb8f",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 106,
"avg_line_length": 38.78048780487805,
"alnum_prop": 0.6490566037735849,
"repo_name": "briancappello/flask-react-spa",
"id": "6ec3d82371a67a3dfb198fa95bece443cef88f35",
"size": "1590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/database/types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8579"
},
{
"name": "Dockerfile",
"bytes": "1009"
},
{
"name": "HTML",
"bytes": "18126"
},
{
"name": "JavaScript",
"bytes": "169637"
},
{
"name": "Makefile",
"bytes": "685"
},
{
"name": "Mako",
"bytes": "509"
},
{
"name": "Python",
"bytes": "282850"
},
{
"name": "Ruby",
"bytes": "5065"
},
{
"name": "Shell",
"bytes": "5231"
}
],
"symlink_target": ""
} |
from typing import Any, Callable, Dict, List, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
location: str, publisher_name: str, offer: str, skus: str, version: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-12-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}",
) # pylint: disable=line-too-long
path_format_arguments = {
"location": _SERIALIZER.url("location", location, "str"),
"publisherName": _SERIALIZER.url("publisher_name", publisher_name, "str"),
"offer": _SERIALIZER.url("offer", offer, "str"),
"skus": _SERIALIZER.url("skus", skus, "str"),
"version": _SERIALIZER.url("version", version, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_request(
location: str,
publisher_name: str,
offer: str,
skus: str,
subscription_id: str,
*,
expand: Optional[str] = None,
top: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-12-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions",
) # pylint: disable=line-too-long
path_format_arguments = {
"location": _SERIALIZER.url("location", location, "str"),
"publisherName": _SERIALIZER.url("publisher_name", publisher_name, "str"),
"offer": _SERIALIZER.url("offer", offer, "str"),
"skus": _SERIALIZER.url("skus", skus, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
if expand is not None:
_params["$expand"] = _SERIALIZER.query("expand", expand, "str")
if top is not None:
_params["$top"] = _SERIALIZER.query("top", top, "int")
if orderby is not None:
_params["$orderby"] = _SERIALIZER.query("orderby", orderby, "str")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_offers_request(location: str, publisher_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-12-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers",
) # pylint: disable=line-too-long
path_format_arguments = {
"location": _SERIALIZER.url("location", location, "str"),
"publisherName": _SERIALIZER.url("publisher_name", publisher_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_publishers_request(location: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-12-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers"
) # pylint: disable=line-too-long
path_format_arguments = {
"location": _SERIALIZER.url("location", location, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_skus_request(
location: str, publisher_name: str, offer: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-12-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus",
) # pylint: disable=line-too-long
path_format_arguments = {
"location": _SERIALIZER.url("location", location, "str"),
"publisherName": _SERIALIZER.url("publisher_name", publisher_name, "str"),
"offer": _SERIALIZER.url("offer", offer, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class VirtualMachineImagesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.compute.v2017_12_01.ComputeManagementClient`'s
:attr:`virtual_machine_images` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def get(
self, location: str, publisher_name: str, offer: str, skus: str, version: str, **kwargs: Any
) -> _models.VirtualMachineImage:
"""Gets a virtual machine image.
:param location: The name of a supported Azure region. Required.
:type location: str
:param publisher_name: A valid image publisher. Required.
:type publisher_name: str
:param offer: A valid image publisher offer. Required.
:type offer: str
:param skus: A valid image SKU. Required.
:type skus: str
:param version: A valid image SKU version. Required.
:type version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineImage or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2017_12_01.models.VirtualMachineImage
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-12-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.VirtualMachineImage]
request = build_get_request(
location=location,
publisher_name=publisher_name,
offer=offer,
skus=skus,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("VirtualMachineImage", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}"} # type: ignore
@distributed_trace
def list(
self,
location: str,
publisher_name: str,
offer: str,
skus: str,
expand: Optional[str] = None,
top: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs: Any
) -> List[_models.VirtualMachineImageResource]:
"""Gets a list of all virtual machine image versions for the specified location, publisher, offer,
and SKU.
:param location: The name of a supported Azure region. Required.
:type location: str
:param publisher_name: A valid image publisher. Required.
:type publisher_name: str
:param offer: A valid image publisher offer. Required.
:type offer: str
:param skus: A valid image SKU. Required.
:type skus: str
:param expand: The expand expression to apply on the operation. Default value is None.
:type expand: str
:param top: Default value is None.
:type top: int
:param orderby: Default value is None.
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2017_12_01.models.VirtualMachineImageResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-12-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[List[_models.VirtualMachineImageResource]]
request = build_list_request(
location=location,
publisher_name=publisher_name,
offer=offer,
skus=skus,
subscription_id=self._config.subscription_id,
expand=expand,
top=top,
orderby=orderby,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("[VirtualMachineImageResource]", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions"} # type: ignore
@distributed_trace
def list_offers(
self, location: str, publisher_name: str, **kwargs: Any
) -> List[_models.VirtualMachineImageResource]:
"""Gets a list of virtual machine image offers for the specified location and publisher.
:param location: The name of a supported Azure region. Required.
:type location: str
:param publisher_name: A valid image publisher. Required.
:type publisher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2017_12_01.models.VirtualMachineImageResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-12-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[List[_models.VirtualMachineImageResource]]
request = build_list_offers_request(
location=location,
publisher_name=publisher_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_offers.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("[VirtualMachineImageResource]", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_offers.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers"} # type: ignore
@distributed_trace
def list_publishers(self, location: str, **kwargs: Any) -> List[_models.VirtualMachineImageResource]:
"""Gets a list of virtual machine image publishers for the specified Azure location.
:param location: The name of a supported Azure region. Required.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2017_12_01.models.VirtualMachineImageResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-12-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[List[_models.VirtualMachineImageResource]]
request = build_list_publishers_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_publishers.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("[VirtualMachineImageResource]", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_publishers.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers"} # type: ignore
@distributed_trace
def list_skus(
self, location: str, publisher_name: str, offer: str, **kwargs: Any
) -> List[_models.VirtualMachineImageResource]:
"""Gets a list of virtual machine image SKUs for the specified location, publisher, and offer.
:param location: The name of a supported Azure region. Required.
:type location: str
:param publisher_name: A valid image publisher. Required.
:type publisher_name: str
:param offer: A valid image publisher offer. Required.
:type offer: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2017_12_01.models.VirtualMachineImageResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-12-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[List[_models.VirtualMachineImageResource]]
request = build_list_skus_request(
location=location,
publisher_name=publisher_name,
offer=offer,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_skus.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("[VirtualMachineImageResource]", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_skus.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus"} # type: ignore
| {
"content_hash": "f0da5d340aa27bd60bb5a3ffec613eae",
"timestamp": "",
"source": "github",
"line_count": 547,
"max_line_length": 221,
"avg_line_length": 42.42961608775137,
"alnum_prop": 0.6468611314576242,
"repo_name": "Azure/azure-sdk-for-python",
"id": "08e1cc209a597b36f839592653f72bd08a1f0db5",
"size": "23709",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/operations/_virtual_machine_images_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.