text stringlengths 4 1.02M | meta dict |
|---|---|
from bson.objectid import ObjectId
import vacker.database
class MediaCollection(object):
_show_hidden = False
def __init__(self, id):
self._id = id
self._initialise()
def _initialise(self):
pass
def show_hidden(self):
self._show_hidden = True
def get_media_ids(self):
db_connection = vacker.database.Database.get_database()
res = db_connection.media.aggregate([{'$match': self._get_media_filter()}, {'$sort': {'datetime': 1}}])
child_ids = [str(item['_id']) for item in res if item['_id'] is not None]
return child_ids
def get_id(self):
return self._id
def get_name(self):
return self._name
def get_details(self):
return {
'id': self.get_id(),
'name': self.get_name(),
'media_count': self.get_media_count(),
'backup_state': self.get_backup_state(),
'hidden_state': self.get_hidden_state()
}
def get_backup_state(self):
"""
Determines the backup state of the media in the collection.
Returns 0 - No media is backed up
1 - Some media is backed up
2 - All media is backed up
"""
media_filter = self._get_media_filter()
media_filter['backup'] = True
db_connection = vacker.database.Database.get_database()
backup_count = db_connection.media.find(media_filter).count()
if backup_count == 0:
return 0
if backup_count < self.get_media_count():
return 1
return 2
def toggle_backup(self):
current_backup_state = self.get_backup_state()
if current_backup_state == 1:
return False
new_backup_state = False if (current_backup_state == 2) else True
db_connection = vacker.database.Database.get_database()
db_connection.media.update(self._get_media_filter(), {'$set': {'backup': new_backup_state}}, multi=True)
return True
def get_hidden_state(self):
"""
Determines the hidden state of the media in the collection.
Returns 0 - No media is hidden
1 - Some media is hidden
2 - All media is hidden
"""
media_filter = self._get_media_filter()
media_filter['hide'] = True
db_connection = vacker.database.Database.get_database()
hidden_count = db_connection.media.find(media_filter).count()
# If none are hidden, return 0
if hidden_count == 0:
return 0
# If all are hiden, return 2
if hidden_count == self.get_media_count():
return 2
# If hidden has not been specified, return 0, as none shown
# will be hidden
if not self._show_hidden:
return 0
# Otherwise, if some are hidden (but hidden are shown), use this status
if hidden_count < self.get_media_count():
return 1
def toggle_hide(self):
current_hidden_state = self.get_hidden_state()
new_hidden_state = False if (current_hidden_state == 2) else True
db_connection = vacker.database.Database.get_database()
db_connection.media.update(self._get_media_filter(), {'$set': {'hide': new_hidden_state}}, multi=True)
return True
def _get_media_filter(self):
if self._show_hidden:
return {}
else:
return {'hide': False}
def get_media_count(self):
db_connection = vacker.database.Database.get_database()
return db_connection.media.find(self._get_media_filter()).count()
def _get_child_ids(self, child_type, return_agg=None):
db_connection = vacker.database.Database.get_database()
res = db_connection.media.aggregate([{'$match': self._get_media_filter()},
{'$group': {'_id': child_type, 'datetime': {'$min': '$datetime'}}},
{'$sort': {'datetime': 1}}
# Get date by adding
# , {'date': {'$min': '$datetime'}}
])
child_ids = []
for item in res:
if return_agg:
item_id = ''
for id_key in return_agg:
item_id += str(item['_id'][id_key]) if len(str(item['_id'][id_key])) != 1 else '0%s' % str(item['_id'][id_key])
child_ids.append(item_id)
else:
child_ids.append(str(item['_id']))
return child_ids
def get_child_sets(self):
return self._get_child_ids('$set_id')
def get_random_thumbnail(self):
db_connection = vacker.database.Database.get_database()
media = db_connection.media.aggregate([
{'$match': self._get_media_filter()},
{'$sample': {'size': 1}}
])
for media_itx in media:
return str(media_itx['_id'])
return None
class DateCollection(MediaCollection):
@staticmethod
def getCollectionFromDateId(media_id):
year, month, day = DateCollection.convertDateId(media_id)
if day:
return DayCollection(media_id)
elif month:
return MonthCollection(media_id)
return YearCollection(media_id)
@staticmethod
def convertDateId(col_id):
year = int(str(col_id)[0:4]) if len(str(col_id)) >= 4 else None
month = int(str(col_id)[4:6]) if len(str(col_id)) >= 6 else None
day = int(str(col_id)[6:8]) if len(str(col_id)) >= 8 else None
return year, month, day
def _initialise(self):
self._year, self._month, self._day = DateCollection.convertDateId(self.get_id())
def get_year(self):
return self._year
def get_month(self):
return self._month
def get_day(self):
return self._day
def get_child_months(self):
return self._get_child_ids({'y': '$y', 'm': '$m'}, ['y', 'm'])
def get_child_days(self):
return self._get_child_ids({'y': '$y', 'm': '$m', 'd': '$d'}, ['y', 'm', 'd'])
def get_child_events(self):
return self._get_child_ids('$event_id')
class AllMedia(DateCollection):
def __init__(self):
pass
def get_backup_media_paths(self, strip_path):
import re
db_connection = vacker.database.Database.get_database()
return_rows = []
for media in db_connection.media.find({'backup': True}):
return_rows.append(re.sub('^%s/?' % strip_path, '+ **/', media['path']))
return "\n".join(return_rows)
def get_years(self):
return self._get_child_ids('$y')
class YearCollection(DateCollection):
def _get_media_filter(self):
media_filter = super(YearCollection, self)._get_media_filter()
media_filter['y'] = self.get_year()
return media_filter
def get_name(self):
return self.get_id()
class MonthCollection(DateCollection):
def get_name(self):
return ['Jan', 'Feb', 'March', 'Apr', 'May', 'June', 'July', 'Aug', 'Sept', 'Oct', 'Nov', 'Dec'][int(self.get_month()) - 1]
def _get_media_filter(self):
media_filter = super(MonthCollection, self)._get_media_filter()
media_filter['y'] = self.get_year()
media_filter['m'] = self.get_month()
return media_filter
class DayCollection(DateCollection):
def get_name(self):
str_id = str(self.get_day())
if str_id.endswith('1'):
return '%s%s' % (str_id, 'st')
if str_id.endswith('2'):
return '%s%s' % (str_id, 'nd')
if str_id.endswith('3'):
return '%s%s' % (str_id, 'rd')
return '%s%s' % (str_id, 'th')
def _get_media_filter(self):
media_filter = super(DayCollection, self)._get_media_filter()
media_filter['y'] = self.get_year()
media_filter['m'] = self.get_month()
media_filter['d'] = self.get_day()
return media_filter
class SetCollection(MediaCollection):
def _get_media_filter(self):
media_filter = super(SetCollection, self)._get_media_filter()
media_filter['set_id'] = ObjectId(self.get_id())
return media_filter
def _initialise(self):
db_connection = vacker.database.Database.get_database()
res = db_connection.sets.find({'_id': ObjectId(self.get_id())})
if not res.count():
raise Exception('Set does not exist: ' % self.get_id())
self._name = res[0]['name'] or ''
| {
"content_hash": "661af119a0810089e188a907fecb4403",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 131,
"avg_line_length": 33.4863813229572,
"alnum_prop": 0.5539158726469905,
"repo_name": "MatthewJohn/vacker",
"id": "e0abafa544dcc7459e7694e04eaf7848277e366c",
"size": "8607",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vacker/media_collection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "930"
},
{
"name": "Dockerfile",
"bytes": "308"
},
{
"name": "HTML",
"bytes": "1721"
},
{
"name": "JavaScript",
"bytes": "16247"
},
{
"name": "Python",
"bytes": "48187"
}
],
"symlink_target": ""
} |
REDDIT_USERNAME = '' # YOUR USERNAME as string
REDDIT_PASS = '' # YOUR PASSWORD as string
| {
"content_hash": "24100c18389253194e351c47fcfcc544",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 47,
"avg_line_length": 46,
"alnum_prop": 0.6956521739130435,
"repo_name": "smithi35/RedditBot",
"id": "67067ed0319f08925e1b27dba43e40faeb8ab389",
"size": "235",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Part2/config_skel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2360"
},
{
"name": "Shell",
"bytes": "161"
}
],
"symlink_target": ""
} |
"""Calculate reproducibility coefficients for parametric maps. Input consists
of pmap scan pairs grouped together.
"""
import argparse
import glob
import os.path
import re
import numpy as np
import dwi.files
import dwi.plot
import dwi.stats
import dwi.util
def parse_args():
"""Parse command-line arguments."""
p = argparse.ArgumentParser(description=__doc__)
p.add_argument('-v', '--verbose', action='count',
help='be more verbose')
p.add_argument('-p', '--patients',
help='patients file')
p.add_argument('-b', '--nboot', type=int, default=2000,
help='number of bootstraps')
p.add_argument('--voxel', default='0',
help='index of voxel to use, or mean or median')
p.add_argument('-m', '--pmaps', nargs='+', required=True,
help='pmap files, pairs grouped together')
p.add_argument('--figdir',
help='figure output directory')
return p.parse_args()
def as_pairs(seq):
"""Return sequence split in two, each containing every second item."""
if len(seq) % 2:
raise ValueError('Sequence length not even: {}'.format(len(seq)))
return seq[0::2], seq[1::2]
def plot(values, param, figdir):
"""Plot a parameter; its two baselines and their differences.
This function was originally made in order to find outliers.
"""
baselines = np.asarray(as_pairs(values))
n = len(baselines[0])
it = dwi.plot.generate_plots(ncols=3, titles=(param,) * 3,
xlabels=('index',) * 3,
ylabels=('value', 'difference', 'value'),
path='{}/{}.png'.format(figdir, param))
for i, plt in enumerate(it):
if i == 0:
# Plot absolute values.
x = range(2 * n)
y = sorted(values)
c = ('lightgray', 'white') * n
plt.scatter(x, y, c=c)
plt.axis((min(x), max(x), min(y), max(y)))
elif i == 1:
# Plot differences.
x = range(n)
y = sorted(np.abs(baselines[0] - baselines[1]))
plt.scatter(x, y, c='lightgray')
plt.axis((min(x), max(x), min(y), max(y)))
elif i == 2:
# Plot sample pairs as bars.
def key(pair):
a, b = pair
return abs(a - b)
pairs = baselines.T
pairs = np.asarray(sorted(pairs, key=key))
left = range(n)
bottom = np.min(pairs, axis=1)
height = np.max(pairs, axis=1) - bottom
plt.bar(left, height, bottom=bottom, color='lightgray')
# bottom = range(n)
# left = np.min(pairs, axis=1)
# width = np.max(pairs, axis=1) - left
# plt.barh(bottom, width, left=left, color='lightgray')
plt.axis('tight')
def glob_if_needed(filenames):
"""Workaround for platforms without shell-level globbing."""
if len(filenames) == 1:
return glob.glob(filenames[0]) or filenames
return filenames
def sort_pmapfiles(paths):
"""Kludge to sort input files for platforms where globbing leaves them
unsorted. Requires certain format.
"""
def sortkey(path):
head, tail = os.path.split(path)
root, ext = os.path.splitext(tail)
case, scan, lesion = root.split('_')
return head, case, lesion, scan
return sorted(paths, key=sortkey)
def parse_filename(filename):
"""Parse input filename formatted as 'num_name_hB_[12][ab]_*'."""
# m = re.match(r'(\d+)_([\w_]+)_[^_]*_(\d\w)_', filename)
m = re.search(r'(\d+)_(\w*)_?(\d\w)_', filename)
if m is None:
raise ValueError('Cannot parse filename: {}'.format(filename))
num, name, scan = m.groups()
return int(num), name.lower(), scan.lower()
def scan_pairs(afs):
"""Check that the ascii files are correctly paired as scan baselines.
Return list of (patient number, scan 1, scan 2) tuples.
"""
def get_tuple(af1, af2):
num1, _, scan1 = parse_filename(af1.basename)
num2, _, scan2 = parse_filename(af2.basename)
if num1 != num2 or scan1[0] != scan2[0]:
raise ValueError('Not a pair: {}, {}'.format(af1.basename,
af2.basename))
return num1, scan1, scan2
baselines = as_pairs(afs)
return [get_tuple(af1, af2) for af1, af2 in zip(*baselines)]
def scan_in_patients(patients, num, scan):
"""Is this scan listed in the patients sequence?"""
return any(num == p.num and scan in p.scans for p in patients)
def load_files(patients, filenames, pairs=False):
"""Load pmap files. If pairs=True, require scan pairs together."""
def filt(filename):
num, _, scan = parse_filename(os.path.basename(filename))
return patients is None or scan_in_patients(patients, num, scan)
pmapfiles = filter(filt, filenames)
afs = [dwi.asciifile.AsciiFile(x) for x in pmapfiles]
if pairs:
scan_pairs(afs)
pmaps = [af.a for af in afs]
pmaps = np.array(pmaps)
params = afs[0].params()
assert pmaps.shape[-1] == len(params), 'Parameter name mismatch.'
return pmaps, params
def select_voxel(pmaps, voxel):
"""Select voxel to use."""
if voxel == 'mean':
return np.mean(pmaps, axis=1) # Use mean voxel.
elif voxel == 'median':
return np.median(pmaps, axis=1) # Use median voxel.
else:
i = int(voxel)
return pmaps[:, i, :] # Use single voxel only.
def get_results(baselines, nboot):
d = dwi.stats.repeatability_coeff(*baselines, avgfun=np.median)
d['msdr'] = d['msd'] / d['avg']
d['cir'] = d['ci'] / d['avg']
d['corr'] = d['cor'] / d['avg']
d['icc'] = dwi.stats.icc(baselines)
if nboot:
t = dwi.stats.bootstrap_icc(baselines, nboot=nboot)
else:
t = (np.nan,) * 3
d['icc_bs'], d['icc_ci1'], d['icc_ci2'] = t
return d
def main():
args = parse_args()
if args.patients:
patients = dwi.files.read_patients_file(args.patients)
else:
patients = None
paths = glob_if_needed(args.pmaps)
paths = sort_pmapfiles(paths) # XXX: Temporary kludge.
pmaps, params = load_files(patients, paths, pairs=True)
X = select_voxel(pmaps, args.voxel)
if args.verbose > 1:
s = 'Samples: {}, features: {}, voxel: {}, bootstraps: {}'
print(s.format(X.shape[0], X.shape[1], args.voxel, args.nboot))
# Print results for each parameter.
if args.verbose:
print('# avg[lower-upper] '
'msd/avg CI/avg wCV CoR/avg '
'ICC bsICC[lower-upper] '
'param')
output = (
'{avg:.8f}[{avg_ci1:.8f}-{avg_ci2:.8f}] '
'{msdr:.4f} {cir:.4f} {wcv:.4f} {corr:.4f} '
'{icc:5.2f} {icc_bs:5.2f}[{icc_ci1:5.2f}-{icc_ci2:5.2f}] '
'{param}'
)
# output = (
# '{corr:7.2f} '
# '{icc:5.2f} {icc_bs:5.2f}[{icc_ci1:5.2f}-{icc_ci2:5.2f}] '
# '{param}'
# )
skipped_params = 'SI0N C RMSE'.split()
for values, param in zip(X.T, params):
if param in skipped_params:
continue
if dwi.util.all_equal(values):
continue
if args.figdir:
plot(values, param, args.figdir)
baselines = as_pairs(values)
d = dict(get_results(baselines, args.nboot), param=param)
print(output.format(**d))
if __name__ == '__main__':
main()
| {
"content_hash": "171e4feb41a0ece3cecce74a45044b12",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 77,
"avg_line_length": 33.43362831858407,
"alnum_prop": 0.5579671784012705,
"repo_name": "jupito/dwilib",
"id": "4a5dd284b3a53520b323b7ba63f564a03bd4edc6",
"size": "7576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dwi/tools/reproducibility.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "341365"
},
{
"name": "Shell",
"bytes": "4383"
}
],
"symlink_target": ""
} |
from owslib.etree import etree
from owslib import crs, util
from owslib.util import testXMLValue, testXMLAttribute, nspath_eval, xmltag_split, dict_union, extract_xml_list
from owslib.namespaces import Namespaces
def get_namespaces():
n = Namespaces()
namespaces = n.get_namespaces(["sml", "gml", "xlink"])
namespaces["ism"] = "urn:us:gov:ic:ism:v2"
return namespaces
namespaces = get_namespaces()
def nsp(path):
return nspath_eval(path, namespaces)
class SensorML(object):
def __init__(self, element):
if isinstance(element, str) or isinstance(element, bytes):
self._root = etree.fromstring(element)
else:
self._root = element
if hasattr(self._root, 'getroot'):
self._root = self._root.getroot()
self.members = [Member(x) for x in self._root.findall(nsp('sml:member'))]
class Member(object):
def __new__(cls, element):
t = element[-1].tag.split("}")[-1]
if t == "System":
return System(element.find(nsp("sml:System")))
elif t == "ProcessChain":
return ProcessChain(element.find(nsp("sml:ProcessChain")))
elif t == "ProcessModel":
return ProcessModel(element.find(nsp("sml:ProcessModel")))
elif t == "Component":
return Component(element.find(nsp("sml:Component")))
class PropertyGroup(object):
def __init__(self, element):
# Both capabilities and characteristics contain a single swe:DataRecord element
self.capabilities = {}
for cap in element.findall(nsp('sml:capabilities')):
name = testXMLAttribute(cap, "name")
if name is not None:
self.capabilities[name] = cap[0]
self.characteristics = {}
for cha in element.findall(nsp('sml:characteristics')):
name = testXMLAttribute(cha, "name")
if name is not None:
self.characteristics[name] = cha[0]
def get_capabilities_by_name(self, name):
"""
Return list of element by name, case insensitive
"""
return [self.capabilities[capab] for capab in list(self.capabilities.keys()) if capab.lower() == name.lower()]
def get_characteristics_by_name(self, name):
"""
Return list of element objects by name, case insensitive
"""
return [self.characteristics[charac]
for charac in list(self.characteristics.keys()) if charac.lower() == name.lower()]
class ConstraintGroup(object):
def __init__(self, element):
# ism:SecurityAttributesOptionsGroup
self.security = element.findall(nsp("sml:securityConstraint/sml:Security/ism:SecurityAttributesOptionGroup"))
# gml:TimeInstant or gml:TimePeriod element
self.validTime = element.find(nsp("sml:validTime"))
self.rights = [Right(x) for x in element.findall(nsp("sml:legalConstraint/sml:Rights"))]
class Documentation(object):
def __init__(self, element):
self.arcrole = testXMLAttribute(element, nsp("xlink:arcrole"))
self.url = testXMLAttribute(element, nsp("xlink:href"))
self.documents = [Document(d) for d in element.findall(nsp("sml:Document"))]
class Document(object):
def __init__(self, element):
self.id = testXMLAttribute(element, nsp("gml:id"))
self.version = testXMLValue(element.find(nsp("sml:version")))
self.description = testXMLValue(element.find(nsp("gml:description")))
self.date = testXMLValue(element.find(nsp("sml:date")))
try:
self.contact = Contact(element.find(nsp("sml:contact")))
except AttributeError:
self.contact = None
self.format = testXMLValue(element.find(nsp('sml:format')))
self.url = testXMLAttribute(element.find(nsp('sml:onlineResource')), nsp('xlink:href'))
class Right(object):
def __init__(self, element):
self.id = testXMLAttribute(element, nsp('gml:id'))
self.privacyAct = testXMLAttribute(element, nsp('sml:privacyAct'))
self.intellectualPropertyRights = testXMLAttribute(element, nsp('sml:intellectualPropertyRights'))
self.copyRights = testXMLAttribute(element, nsp('sml:copyRights'))
self.documentation = [Documentation(x) for x in element.findall(nsp("sml:documentation"))]
class ReferenceGroup(object):
def __init__(self, element):
self.contacts = {}
for contact in element.findall(nsp('sml:contact')):
cont = Contact(contact)
self.contacts[cont.role] = cont
self.documentation = [Documentation(x) for x in element.findall(nsp("sml:documentation"))]
def get_contacts_by_role(self, role):
"""
Return a Contact by role, case insensitive
"""
return [self.contacts[contact] for contact in list(self.contacts.keys()) if contact.lower() == role.lower()]
class GeneralInfoGroup(object):
def __init__(self, element):
self.keywords = extract_xml_list(element.findall(nsp('sml:keywords/sml:KeywordList/sml:keyword')))
self.identifiers = {}
for identifier in element.findall(nsp('sml:identification/sml:IdentifierList/sml:identifier')):
ident = Identifier(identifier)
self.identifiers[ident.name] = ident
self.classifiers = {}
for classifier in element.findall(nsp('sml:classification/sml:ClassifierList/sml:classifier')):
classi = Classifier(classifier)
self.classifiers[classi.name] = classi
def get_identifiers_by_name(self, name):
"""
Return list of Identifier objects by name, case insensitive
"""
return [self.identifiers[identifier]
for identifier in list(self.identifiers.keys()) if identifier.lower() == name.lower()]
def get_classifiers_by_name(self, name):
"""
Return list of Classifier objects by name, case insensitive
"""
return [self.classifiers[classi] for classi in list(self.classifiers.keys()) if classi.lower() == name.lower()]
class Contact(object):
def __init__(self, element):
# TODO: This only supports the sml:contact/sml:ResponsibleParty elements, but there are numerous ways to store
# contact information here.
self.role = testXMLAttribute(element, nsp("xlink:role"))
self.href = testXMLAttribute(element, nsp("xlink:href"))
self.organization = testXMLValue(element.find(nsp('sml:ResponsibleParty/sml:organizationName')))
self.phone = testXMLValue(element.find(nsp('sml:ResponsibleParty/sml:contactInfo/sml:phone/sml:voice')))
self.address = testXMLValue(
element.find(nsp('sml:ResponsibleParty/sml:contactInfo/sml:address/sml:deliveryPoint')))
self.city = testXMLValue(element.find(nsp('sml:ResponsibleParty/sml:contactInfo/sml:address/sml:city')))
self.region = testXMLValue(
element.find(nsp('sml:ResponsibleParty/sml:contactInfo/sml:address/sml:administrativeArea')))
self.postcode = testXMLValue(
element.find(nsp('sml:ResponsibleParty/sml:contactInfo/sml:address/sml:postalCode')))
self.country = testXMLValue(element.find(nsp('sml:ResponsibleParty/sml:contactInfo/sml:address/sml:country')))
self.email = testXMLValue(
element.find(nsp('sml:ResponsibleParty/sml:contactInfo/sml:address/sml:electronicMailAddress')))
self.url = testXMLAttribute(
element.find(nsp('sml:ResponsibleParty/sml:contactInfo/sml:onlineResource')), nsp("xlink:href"))
class HistoryGroup(object):
def __init__(self, element):
self.history = {}
for event_member in element.findall(nsp('sml:history/sml:EventList/sml:member')):
name = testXMLAttribute(event_member, "name")
if self.history.get(name) is None:
self.history[name] = []
for e in event_member.findall(nsp("sml:Event")):
self.history[name].append(Event(e))
def get_history_by_name(self, name):
"""
Return Events list by members name
"""
return self.history.get(name.lower(), [])
class Event(ReferenceGroup, GeneralInfoGroup):
def __init__(self, element):
ReferenceGroup.__init__(self, element)
GeneralInfoGroup.__init__(self, element)
self.id = testXMLAttribute(element, nsp("gml:id"))
self.date = testXMLValue(element.find(nsp('sml:date')))
self.description = testXMLValue(element.find(nsp('gml:description')))
class MetadataGroup(GeneralInfoGroup, PropertyGroup, ConstraintGroup, ReferenceGroup, HistoryGroup):
def __init__(self, element):
GeneralInfoGroup.__init__(self, element)
PropertyGroup.__init__(self, element)
ConstraintGroup.__init__(self, element)
ReferenceGroup.__init__(self, element)
HistoryGroup.__init__(self, element)
class AbstractFeature(object):
def __init__(self, element):
self.name = testXMLValue(element.find(nsp("gml:name")))
self.description = testXMLValue(element.find(nsp("gml:description")))
self.gmlBoundedBy = testXMLValue(element.find(nsp("gml:boundedBy")))
class AbstractProcess(AbstractFeature, MetadataGroup):
def __init__(self, element):
AbstractFeature.__init__(self, element)
MetadataGroup.__init__(self, element)
# sml:IoComponentPropertyType
self.inputs = element.findall(nsp("sml:input"))
# sml:IoComponentPropertyType
self.outputs = element.findall(nsp("sml:output"))
# swe:DataComponentPropertyType
self.parameters = element.findall(nsp("sml:parameter"))
class AbstractRestrictedProcess(AbstractFeature):
""" Removes ('restricts' in xml schema language) gml:name, gml:description,
and sml:metadataGroup from an AbstractProcess """
def __init__(self, element):
AbstractFeature.__init__(self, element)
self.name = None
self.description = None
class AbstractPureProcess(AbstractRestrictedProcess):
def __init__(self, element):
AbstractRestrictedProcess.__init__(self, element)
# sml:IoComponentPropertyType
self.inputs = element.findall(nsp("sml:input"))
# sml:IoComponentPropertyType
self.outputs = element.findall(nsp("sml:output"))
# swe:DataComponentPropertyType
self.parameters = element.findall(nsp("sml:parameter"))
class ProcessModel(AbstractPureProcess):
def __init__(self, element):
AbstractPureProcess.__init__(self, element)
self.method = ProcessMethod(element.find("method"))
class CompositePropertiesGroup(object):
def __init__(self, element):
# All components should be of instance AbstractProcess (sml:_Process)
self.components = element.findall(nsp("sml:components/sml:ComponentList/sml:component"))
# sml:Link or sml:ArrayLink element
self.connections = element.findall(nsp("sml:connections/sml:ConnectionList/sml:connection"))
class PhysicalPropertiesGroup(object):
def __init__(self, element):
# gml:EngieeringCRS element
self.spatialReferenceFrame = element.find(nsp("sml:spatialReferenceFrame/gml:EngineeringCRS"))
# gml:TemporalCRS element
self.temporalReferenceFrame = element.find(nsp("sml:temporalReferenceFrame/gml:TemporalCRS"))
# gml:Envelope element
self.smlBoundedBy = element.find(nsp("sml:boundedBy"))
# swe:Time or sml:_Process element
self.timePosition = element.find(nsp("sml:timePosition"))
# It is either a sml:position OR and sml:location element here. Process both.
# swe:Position, swe:Vector, or sml:_Process element
self.positions = element.findall(nsp("sml:position"))
# gml:Point of gml:_Curve
self.location = element.find(nsp("sml:location"))
try:
self.interface = Interface(element.find(nsp("sml:interface")))
except AttributeError:
self.interface = None
class ProcessChain(AbstractPureProcess, CompositePropertiesGroup):
def __init__(self, element):
AbstractPureProcess.__init__(self, element)
CompositePropertiesGroup.__init__(self, element)
class System(AbstractProcess, PhysicalPropertiesGroup, CompositePropertiesGroup):
def __init__(self, element):
AbstractProcess.__init__(self, element)
PhysicalPropertiesGroup.__init__(self, element)
CompositePropertiesGroup.__init__(self, element)
class Component(AbstractProcess, PhysicalPropertiesGroup):
def __init__(self, element):
AbstractProcess.__init__(self, element)
PhysicalPropertiesGroup.__init__(self, element)
self.method = ProcessMethod(element.find("method"))
class Term(object):
def __init__(self, element):
self.codeSpace = testXMLAttribute(element.find(nsp('sml:Term/sml:codeSpace')), nsp("xlink:href"))
self.definition = testXMLAttribute(element.find(nsp('sml:Term')), "definition")
self.value = testXMLValue(element.find(nsp('sml:Term/sml:value')))
class Classifier(Term):
def __init__(self, element):
Term.__init__(self, element)
self.name = testXMLAttribute(element, "name")
class Identifier(Term):
def __init__(self, element):
Term.__init__(self, element)
self.name = testXMLAttribute(element, "name")
class ProcessMethod(MetadataGroup):
""" Inherits from gml:AbstractGMLType """
def __init__(self, element):
MetadataGroup.__init__(self, element)
self.rules = element.find(nsp("sml:rules"))
self.ioStructure = element.find(nsp("sml:IOStructureDefinition"))
self.algorithm = element.find(nsp("sml:algorithm"))
self.implementations = element.findall(nsp("sml:implementation"))
class Interface(object):
def __init__(self, element):
self.name = testXMLAttribute(element, "name")
self.interface_definition = InterfaceDefinition(element.find(nsp("sml:InterfaceDefinition")))
class InterfaceDefinition(object):
def __init__(self, element):
raise NotImplementedError("InterfaceDefinition is not implemented in OWSLib (yet)")
class Link(object):
def __init__(self, element):
raise NotImplementedError("Link is not implemented in OWSLib (yet)")
class ArrayLink(object):
def __init__(self, element):
raise NotImplementedError("ArrayLink is not implemented in OWSLib (yet)")
| {
"content_hash": "7b32703a8391048e5c16a0980cdc4598",
"timestamp": "",
"source": "github",
"line_count": 360,
"max_line_length": 119,
"avg_line_length": 40.30277777777778,
"alnum_prop": 0.6603487490523123,
"repo_name": "geopython/OWSLib",
"id": "d4d8b0382f9729dce994e79baf64b6143dea44e7",
"size": "14528",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "owslib/swe/sensor/sml.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "4639"
},
{
"name": "Makefile",
"bytes": "67"
},
{
"name": "Python",
"bytes": "1158395"
}
],
"symlink_target": ""
} |
default_app_config = 'profiles.apps.ProfilesConfig'
| {
"content_hash": "0190070450ea602479b3c038d70ec36f",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 51,
"avg_line_length": 52,
"alnum_prop": 0.8076923076923077,
"repo_name": "un33k/djangoware",
"id": "361a18c7d5aa66541899126417045f38aa20a126",
"size": "262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/profiles/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6198"
},
{
"name": "Python",
"bytes": "109976"
},
{
"name": "Shell",
"bytes": "8545"
}
],
"symlink_target": ""
} |
from euca2ools.commands.argtypes import delimited_list
from euca2ools.commands.elasticloadbalancing import ELBRequest
from requestbuilder import Arg
from requestbuilder.mixins import TabifyingMixin
class ApplySecurityGroupsToLoadBalancer(ELBRequest, TabifyingMixin):
DESCRIPTION = ('[VPC only] Associate one or more security groups with a '
'load balancer. All previous associations with security '
'groups will be replaced.')
ARGS = [Arg('LoadBalancerName', metavar='ELB',
help='name of the load balancer to modify (required)'),
Arg('-g', '--security-groups', dest='SecurityGroups.member',
metavar='GROUP1,GROUP2,...', type=delimited_list(','),
required=True, help='''security groups to associate the load
balancer with (required)''')]
LIST_TAGS = ['SecurityGroups']
def print_result(self, result):
print self.tabify(('SECURITY_GROUPS',
', '.join(result.get('SecurityGroups', []))))
| {
"content_hash": "270f40a75e4f2f3ac1edc073e14d3edd",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 50.142857142857146,
"alnum_prop": 0.647673314339981,
"repo_name": "vasiliykochergin/euca2ools",
"id": "24bde3f8fd15dd563719fced86dd149312ba43ee",
"size": "2395",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "euca2ools/commands/elasticloadbalancing/applysecuritygroupstoloadbalancer.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1220919"
},
{
"name": "Shell",
"bytes": "872"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import User
from django.db import models
class PlayerStat(models.Model):
user = models.OneToOneField(User, related_name="stats")
points = models.IntegerField(default=0)
| {
"content_hash": "09c93f8b461076bd13ef369bed3e9162",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 59,
"avg_line_length": 30.142857142857142,
"alnum_prop": 0.7677725118483413,
"repo_name": "kinsights/brabeion",
"id": "1f38c200b94bed6f9fd6640901ac70db704b9387",
"size": "211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "brabeion/tests/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "14046"
}
],
"symlink_target": ""
} |
import os
import re
import json
import collections
from ..cgi import BASE_PATH, save, MyDict
from ..emailer import sendemail
import markdown
from jinja2 import Environment, Markup, PackageLoader
md = markdown.Markdown(extensions=['markdown.extensions.nl2br'],
output_format='xhtml5')
env = Environment(loader=PackageLoader('ldform', 'templates'))
env.filters['markdown'] = lambda text: Markup(md.convert(text))
def respond(data):
"""Show ballot summary."""
email = data.getfirst('confemail')
if email and '@' in email:
lang = data.getfirst('lang', 'cs')
tplname = 'voteconfmail.j2' if lang == 'cs' else 'voteconfmailen.j2'
emailtext = env.get_template(tplname).render(data=data)
subject = "LinuxDays: potvrzení hlasování" if lang == 'cs' else "LinuxDays: vote confirmation"
recipients = [('', email.strip())]
sendemail(emailtext, subject, recipients)
print("""Status: 303 See Other
Location: /cgi-bin/ldform.py?formid=vote2017conf&b={}
Content-type: text/html; charset=UTF-8")
<html>
<body>
<h1>Your response has been recorded. Thank you.</h1>
</body>
</html>
""".format(data['regid']))
| {
"content_hash": "35f6b1e49d167bc6c0f17ac63fff0672",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 102,
"avg_line_length": 32.75,
"alnum_prop": 0.6802374893977947,
"repo_name": "oskar456/ldform",
"id": "353b41f62a8951b290973d934cf52d3b820a45fc",
"size": "1182",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ldform/handlers/vote2017.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "194"
},
{
"name": "Python",
"bytes": "17266"
}
],
"symlink_target": ""
} |
"""
Interface for all diagnostic tests plugins.
"""
from control.plugin import DeclareFramework
@DeclareFramework('diagnostics')
class Diagnostics(object):
"""Interface for diagnostics classes."""
def __init__(self, **options):
"""Init function"""
pass
def launch_diags(self, device, bmc):
"""launches the requested diagnostic test on the device"""
pass
| {
"content_hash": "ea69750b2f026bdfaf904fbedfbed28a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 66,
"avg_line_length": 25.0625,
"alnum_prop": 0.6608478802992519,
"repo_name": "intel-ctrlsys/actsys",
"id": "5353a0c851cfa340e98285aad495b6350b4de053",
"size": "463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "actsys/control/diagnostics/diagnostics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "11641"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1048209"
}
],
"symlink_target": ""
} |
import requests
import logging
import time
from ..model.Scanner import NetworkScanner
from ..configuration import Configuration
def sync_with_server(data):
r = requests.post(Configuration.SERVER_URI, data=data)
if r.status_code == 200:
logging.warning('Networking data send was successfull!')
else:
logging.warning('Networking data error!!! Error code: %s' % (str(r.status_code)))
def main():
ns = NetworkScanner()
while True:
if not ns.running:
logging.warning('-' * 10)
logging.warning("Networking scan will run...")
ns.scan()
logging.warning("Networking send information to server!")
sync_with_server(ns.registry)
logging.warning("Networking goes to sleep for 5 minutes...")
time.sleep(300)
# Run using: python -m client.run.network_run
if __name__ == '__main__':
main()
| {
"content_hash": "75389e99c8dab783af9d720568220fd2",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 89,
"avg_line_length": 29.419354838709676,
"alnum_prop": 0.631578947368421,
"repo_name": "rdenadai/raspberry-home-auto",
"id": "b1f32b0ab5fc4c7a2caa1ce354fe6cebeff404cf",
"size": "938",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/run/network_run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "906"
},
{
"name": "HTML",
"bytes": "4116"
},
{
"name": "JavaScript",
"bytes": "50904"
},
{
"name": "Python",
"bytes": "14109"
},
{
"name": "Shell",
"bytes": "283"
}
],
"symlink_target": ""
} |
"""Test the topology module's Server Selection Spec implementation."""
import os
import sys
sys.path[0:0] = [""]
from test import unittest
from test.utils_selection_tests import create_selection_tests
# Location of JSON test specifications.
_TEST_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.path.join('server_selection', 'server_selection'))
class TestAllScenarios(create_selection_tests(_TEST_PATH)):
pass
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "6f72f12cb24e13b10182214470f1fb87",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 70,
"avg_line_length": 22.681818181818183,
"alnum_prop": 0.7054108216432866,
"repo_name": "ramnes/mongo-python-driver",
"id": "b54eea4f3c56b4b88231ced775c446b11c2bca59",
"size": "1073",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_server_selection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "202602"
},
{
"name": "Python",
"bytes": "1846556"
},
{
"name": "Shell",
"bytes": "7279"
}
],
"symlink_target": ""
} |
import os
import json
import logging
import pkg_resources
import cssutils
from flask import Flask, g
from flask_wtf import CSRFProtect
from flask_login import current_user
from flask_principal import Principal, UserNeed, identity_loaded
from flaskapp.lib import template_helpers
from flaskapp.meta import mail, db, lm
from flaskapp.models import User
from flaskapp.views import content, auth
# suppress cssutils warning messages
cssutils.log.setLevel(logging.CRITICAL)
# ================================
# App creator method
# ================================
def create_app(extra_config=None):
"""Create Flask app for Flaskapp
"""
app = Flask('flaskapp',
template_folder='templates',
static_folder='static')
app.config.from_object('config')
app.config.update(**(extra_config or {}))
app.before_request(before_request)
# import static file manifest
js = pkg_resources.resource_string('flaskapp', '/static/rev-manifest.json')
app.config['static_manifest'] = json.loads(js.decode('utf-8'))
# configure jinja2
app.jinja_env.globals.update({'h': template_helpers})
# add Flask-WTForms CSRF Protection
CSRFProtect(app)
# init Flask-SQLAlchemy
db.init_app(app)
# init Flask-Principal
Principal(app)
identity_loaded.connect(on_identity_loaded, app)
# init Flask-Login
lm.init_app(app)
lm.login_view = 'auth.login'
lm.user_loader(load_user)
# init Flask-Mail
mail.init_app(app)
# register blueprints
app.register_blueprint(content.bp)
app.register_blueprint(auth.bp, url_prefix='/auth')
return app
# ===============================
# Helper methods
# ===============================
def before_request():
"""Add current user to g object
"""
g.user = current_user
def load_user(id):
"""Method for LoginManager user_loader method
"""
return User.query.get(int(id))
def on_identity_loaded(sender, identity):
"""Method for Flask Principal identity load listener
"""
# set the identity user object
identity.user = current_user
if current_user.is_authenticated:
# add UserNeed to identity
identity.provides.add(UserNeed(current_user.id))
| {
"content_hash": "79df8827cc22c35c0bdf7900192cdc31",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 79,
"avg_line_length": 24.47826086956522,
"alnum_prop": 0.650088809946714,
"repo_name": "ChIaSg/flaskapp",
"id": "dff7adc177b5170deece4a2991fd31071dbe2da6",
"size": "2252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flaskapp/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3876"
},
{
"name": "HTML",
"bytes": "16089"
},
{
"name": "JavaScript",
"bytes": "1699"
},
{
"name": "Python",
"bytes": "29172"
}
],
"symlink_target": ""
} |
@task()
def path(self):
url = self.locust.host + '/path'
data = '''content'''
self.response = self.client.request(
method='POST',
url=url,
data=data,
)
| {
"content_hash": "6cb1632775d331e3bbaa43958eaa73ac",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 44,
"avg_line_length": 20.90909090909091,
"alnum_prop": 0.43478260869565216,
"repo_name": "xaxa89/mitmproxy",
"id": "989df455fcfb071f2d426e4f2ef39aba553c5007",
"size": "230",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "test/mitmproxy/data/test_flow_export/locust_task_post.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17714"
},
{
"name": "HTML",
"bytes": "4270"
},
{
"name": "JavaScript",
"bytes": "150625"
},
{
"name": "PowerShell",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1535155"
},
{
"name": "Shell",
"bytes": "3660"
}
],
"symlink_target": ""
} |
NAME="Immediate Alert"
UUID=0x1802
| {
"content_hash": "27d4f0cce80a37edb4ddab4de119d260",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 22,
"avg_line_length": 17.5,
"alnum_prop": 0.8,
"repo_name": "brettchien/PyBLEWrapper",
"id": "d506989a5b4bdab3c0efe727e5f0f03c1cb4421c",
"size": "35",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyble/const/service/immediate_alert.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "110350"
}
],
"symlink_target": ""
} |
"""Provides a seam for taskqueue-related operations."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import json
from google.appengine.api import taskqueue
from google.appengine.ext import deferred
# NOTE: The following constants should match the queue names in queue.yaml.
# Taskqueue for backing up state.
QUEUE_NAME_BACKUPS = 'backups'
# Taskqueue for running continuous computation jobs.
QUEUE_NAME_CONTINUOUS_JOBS = 'continuous-jobs'
# Default queue for processing tasks (including MapReduce ones).
QUEUE_NAME_DEFAULT = 'default'
# Taskqueue for sending email.
QUEUE_NAME_EMAILS = 'emails'
# Deferred queue for processing events outside the request/response cycle.
QUEUE_NAME_EVENTS = 'events'
# Taskqueue for running one-off jobs.
QUEUE_NAME_ONE_OFF_JOBS = 'one-off-jobs'
# Taskqueue for updating stats models.
QUEUE_NAME_STATS = 'stats'
def defer(fn, queue_name, *args, **kwargs):
"""Adds a new task to a specified deferred queue.
Args:
fn: *. The task being deferred. Will be called as: fn(*args, **kwargs).
queue_name: str. The name of the queue to place the task into. Should be
one of the QUEUE_NAME_* constants listed above.
*args: list(*). Positional arguments for fn.
**kwargs: dict(str : *). Keyword arguments for fn.
"""
# See https://developers.google.com/appengine/articles/deferred for details
# on the _queue kwarg.
deferred.defer(fn, *args, _queue=queue_name, **kwargs)
def enqueue_email_task(url, params, countdown):
"""Adds a new task for sending email.
Args:
url: str. Url of the handler function.
params: dict(str : *). Parameters that will be passed as payload to
handler function.
countdown: int. Amount of time, in seconds, to wait before executing
task.
"""
# See https://cloud.google.com/appengine/docs/python/taskqueue for
# details of various parameters set when adding a new task.
taskqueue.add(
queue_name=QUEUE_NAME_EMAILS, url=url, payload=json.dumps(params),
countdown=countdown, target=taskqueue.DEFAULT_APP_VERSION)
# A special exception that ensures that the task is not tried again, if it
# fails.
PermanentTaskFailure = deferred.PermanentTaskFailure
| {
"content_hash": "b0e1aa86d6f209cb6a7c872b5f7e0ead",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 80,
"avg_line_length": 38.25806451612903,
"alnum_prop": 0.7099494097807757,
"repo_name": "prasanna08/oppia",
"id": "44eaff1b55542791c856dc1d42e87374d08bb5a2",
"size": "2995",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/platform/taskqueue/gae_taskqueue_services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "97795"
},
{
"name": "HTML",
"bytes": "1128491"
},
{
"name": "JavaScript",
"bytes": "733121"
},
{
"name": "Python",
"bytes": "9362251"
},
{
"name": "Shell",
"bytes": "10639"
},
{
"name": "TypeScript",
"bytes": "6077851"
}
],
"symlink_target": ""
} |
import pytest
from py2neo import Graph, Node
from py2neo.ext.ogm import Store
class Person(object):
__primarykey__ = "email"
def __init__(self, email=None, name=None, age=None):
self.email = email
self.name = name
self.age = age
def __eq__(self, other):
return self.email == other.email
def __ne__(self, other):
return self.email != other.email
def __repr__(self):
return "{0} <{1}>".format(self.name, self.email)
class TestExampleCode(object):
@pytest.fixture(autouse=True)
def setup(self, graph):
self.graph = graph
def test_can_execute_example_code(self):
class Person(object):
def __init__(self, email=None, name=None, age=None):
self.email = email
self.name = name
self.age = age
def __str__(self):
return self.name
graph = Graph()
store = Store(graph)
alice = Person("alice@example.com", "Alice", 34)
store.save_unique("People", "email", alice.email, alice)
bob = Person("bob@example.org", "Bob", 66)
carol = Person("carol@example.net", "Carol", 42)
store.relate(alice, "LIKES", bob)
store.relate(alice, "LIKES", carol)
store.save(alice)
friends = store.load_related(alice, "LIKES", Person)
print("Alice likes {0}".format(" and ".join(str(f) for f in friends)))
class TestRelate(object):
@pytest.fixture(autouse=True)
def setup(self, graph):
self.graph = graph
self.store = Store(self.graph)
def test_can_relate_to_other_object(self):
alice = Person("alice@example.com", "Alice", 34)
bob = Person("bob@example.org", "Bob", 66)
self.store.relate(alice, "LIKES", bob)
assert hasattr(alice, "__rel__")
assert isinstance(alice.__rel__, dict)
assert "LIKES" in alice.__rel__
assert alice.__rel__["LIKES"] == [({}, bob)]
def test_can_relate_to_other_object_with_properties(self):
alice = Person("alice@example.com", "Alice", 34)
bob = Person("bob@example.org", "Bob", 66)
self.store.relate(alice, "LIKES", bob, {"since": 1999})
assert hasattr(alice, "__rel__")
assert isinstance(alice.__rel__, dict)
assert "LIKES" in alice.__rel__
assert alice.__rel__["LIKES"] == [({"since": 1999}, bob)]
class TestSeparate(object):
@pytest.fixture(autouse=True)
def setup(self, graph):
self.graph = graph
self.store = Store(self.graph)
def test_can_separate_from_other_objects(self):
alice = Person("alice@example.com", "Alice", 34)
bob = Person("bob@example.org", "Bob", 66)
carol = Person("carol@example.net", "Carol", 42)
self.store.relate(alice, "LIKES", bob)
self.store.relate(alice, "LIKES", carol)
self.store.separate(alice, "LIKES", carol)
assert alice.__rel__["LIKES"] == [({}, bob)]
self.store.separate(alice, "LIKES", bob)
assert alice.__rel__["LIKES"] == []
def test_can_separate_without_previous_relate(self):
alice = Person("alice@example.com", "Alice", 34)
bob = Person("bob@example.org", "Bob", 66)
assert not hasattr(alice, "__rel__")
self.store.separate(alice, "LIKES", bob)
assert not hasattr(alice, "__rel__")
def test_nothing_happens_if_unknown_rel_type_supplied(self):
alice = Person("alice@example.com", "Alice", 34)
bob = Person("bob@example.org", "Bob", 66)
self.store.relate(alice, "LIKES", bob)
self.store.separate(alice, "DISLIKES", bob)
assert alice.__rel__["LIKES"] == [({}, bob)]
def test_nothing_happens_if_unknown_endpoint_supplied(self):
alice = Person("alice@example.com", "Alice", 34)
bob = Person("bob@example.org", "Bob", 66)
carol = Person("carol@example.net", "Carol", 42)
self.store.relate(alice, "LIKES", bob)
self.store.separate(alice, "LIKES", carol)
assert alice.__rel__["LIKES"] == [({}, bob)]
class TestLoadRelated(object):
@pytest.fixture(autouse=True)
def setup(self, graph):
self.graph = graph
self.store = Store(self.graph)
def test_can_load_single_related_object(self):
alice = Person("alice@example.com", "Alice", 34)
bob = Person("bob@example.org", "Bob", 66)
self.store.relate(alice, "LIKES", bob)
self.store.save(alice)
friends = self.store.load_related(alice, "LIKES", Person)
assert friends == [bob]
def test_can_load_multiple_related_objects(self):
alice = Person("alice@example.com", "Alice", 34)
bob = Person("bob@example.org", "Bob", 66)
carol = Person("carol@example.net", "Carol", 42)
self.store.relate(alice, "LIKES", bob)
self.store.relate(alice, "LIKES", carol)
self.store.save(alice)
friends = self.store.load_related(alice, "LIKES", Person)
assert friends == [bob, carol]
def test_can_load_related_objects_among_other_relationships(self):
alice = Person("alice@example.com", "Alice", 34)
bob = Person("bob@example.org", "Bob", 66)
carol = Person("carol@example.net", "Carol", 42)
dave = Person("dave@example.co.uk", "Dave", 18)
self.store.relate(alice, "LIKES", bob)
self.store.relate(alice, "LIKES", carol)
self.store.relate(alice, "DISLIKES", dave)
self.store.save(alice)
friends = self.store.load_related(alice, "LIKES", Person)
assert friends == [bob, carol]
enemies = self.store.load_related(alice, "DISLIKES", Person)
assert enemies == [dave]
def test_can_load_related_when_never_related(self):
alice = Person("alice@example.com", "Alice", 34)
friends = self.store.load_related(alice, "LIKES", Person)
assert friends == []
class TestLoad(object):
@pytest.fixture(autouse=True)
def setup(self, graph):
self.graph = graph
self.store = Store(self.graph)
def test_can_load(self):
alice_node, = self.graph.create({
"email": "alice@example.com",
"name": "Alice",
"age": 34,
})
alice = self.store.load(Person, alice_node)
assert alice.email == "alice@example.com"
assert alice.name == "Alice"
assert alice.age == 34
class TestLoadIndexed(object):
@pytest.fixture(autouse=True)
def setup(self, graph):
self.graph = graph
try:
self.graph.legacy.delete_index(Node, "People")
except LookupError:
pass
self.store = Store(self.graph)
def test_can_load(self):
people = self.graph.legacy.get_or_create_index(Node, "People")
alice_node, bob_node = self.graph.create({
"email": "alice@example.com",
"name": "Alice Smith",
"age": 34,
}, {
"email": "bob@example.org",
"name": "Bob Smith",
"age": 66,
})
people.add("family_name", "Smith", alice_node)
people.add("family_name", "Smith", bob_node)
smiths = self.store.load_indexed("People", "family_name", "Smith", Person)
assert len(smiths) == 2
for i, smith in enumerate(smiths):
assert smiths[i].email in ("alice@example.com", "bob@example.org")
assert smiths[i].name in ("Alice Smith", "Bob Smith")
assert smiths[i].age in (34, 66)
class TestLoadUnique(object):
@pytest.fixture(autouse=True)
def setup(self, graph):
self.graph = graph
try:
self.graph.legacy.delete_index(Node, "People")
except LookupError:
pass
self.graph.legacy.get_or_create_index(Node, "People")
self.store = Store(self.graph)
def test_can_load_simple_object(self):
alice_node = self.graph.legacy.get_or_create_indexed_node(
"People", "email", "alice@example.com", {
"email": "alice@example.com",
"name": "Alice Allison",
"age": 34,
}
)
alice = self.store.load_unique("People", "email", "alice@example.com", Person)
assert isinstance(alice, Person)
assert hasattr(alice, "__node__")
assert alice.__node__ == alice_node
assert hasattr(alice, "__rel__")
assert alice.__rel__ == {}
assert alice.email == "alice@example.com"
assert alice.name == "Alice Allison"
assert alice.age == 34
def test_can_load_object_with_relationships(self):
alice_node = self.graph.legacy.get_or_create_indexed_node(
"People", "email", "alice@example.com", {
"email": "alice@example.com",
"name": "Alice Allison",
"age": 34,
}
)
path = alice_node.create_path("LIKES", {"name": "Bob Robertson"})
bob_node = path.nodes[1]
alice = self.store.load_unique("People", "email", "alice@example.com", Person)
assert isinstance(alice, Person)
assert hasattr(alice, "__node__")
assert alice.__node__ == alice_node
assert hasattr(alice, "__rel__")
assert alice.__rel__ == {
"LIKES": [({}, bob_node)],
}
assert alice.email == "alice@example.com"
assert alice.name == "Alice Allison"
assert alice.age == 34
friends = self.store.load_related(alice, "LIKES", Person)
assert isinstance(friends, list)
assert len(friends) == 1
friend = friends[0]
assert isinstance(friend, Person)
assert friend.__node__ == bob_node
enemies = self.store.load_related(alice, "DISLIKES", Person)
assert isinstance(enemies, list)
assert len(enemies) == 0
def test_will_not_load_when_none_exists(self):
alice = self.store.load_unique("People", "email", "alice@example.com", Person)
assert alice is None
class TestReload(object):
@pytest.fixture(autouse=True)
def setup(self, graph):
self.graph = graph
self.store = Store(self.graph)
def test_can_reload(self):
alice = Person("alice@example.com", "Alice", 34)
self.store.save_unique("People", "email", "alice@example.com", alice)
assert alice.__node__["name"] == "Alice"
assert alice.__node__["age"] == 34
alice.__node__["name"] = "Alice Smith"
alice.__node__["age"] = 35
alice.__node__.push()
self.store.reload(alice)
assert alice.name == "Alice Smith"
assert alice.age == 35
class TestSave(object):
@pytest.fixture(autouse=True)
def setup(self, graph):
self.graph = graph
self.store = Store(self.graph)
def test_can_save_simple_object(self):
alice = Person("alice@example.com", "Alice", 34)
assert not self.store.is_saved(alice)
self.store.save_unique("People", "email", "alice@example.com", alice)
assert self.store.is_saved(alice)
assert alice.__node__["name"] == "Alice"
assert alice.__node__["age"] == 34
alice.name = "Alice Smith"
alice.age = 35
self.store.save(alice)
assert alice.__node__["name"] == "Alice Smith"
assert alice.__node__["age"] == 35
class TestSaveIndexed(object):
@pytest.fixture(autouse=True)
def setup(self, graph):
self.graph = graph
try:
self.graph.legacy.delete_index(Node, "People")
except LookupError:
pass
self.store = Store(self.graph)
def test_can_save(self):
alice = Person("alice@example.com", "Alice Smith", 34)
bob = Person("bob@example.org", "Bob Smith", 66)
self.store.save_indexed("People", "family_name", "Smith", alice, bob)
people = self.graph.legacy.get_index(Node, "People")
smiths = people.get("family_name", "Smith")
assert len(smiths) == 2
assert alice.__node__ in smiths
assert bob.__node__ in smiths
carol = Person("carol@example.net", "Carol Smith", 42)
self.store.save_indexed("People", "family_name", "Smith", carol)
smiths = people.get("family_name", "Smith")
assert len(smiths) == 3
assert alice.__node__ in smiths
assert bob.__node__ in smiths
assert carol.__node__ in smiths
class TestSaveUnique(object):
@pytest.fixture(autouse=True)
def setup(self, graph):
self.graph = graph
self.store = Store(self.graph)
def test_can_save_simple_object(self):
alice = Person("alice@example.com", "Alice", 34)
self.store.save_unique("People", "email", "alice@example.com", alice)
assert hasattr(alice, "__node__")
assert isinstance(alice.__node__, Node)
assert alice.__node__ == self.graph.legacy.get_indexed_node(
"People", "email", "alice@example.com")
def test_can_save_object_with_rels(self):
alice = Person("alice@example.com", "Alice Allison", 34)
bob_node, carol_node = self.graph.create(
{"name": "Bob"},
{"name": "Carol"},
)
alice.__rel__ = {"KNOWS": [({}, bob_node)]}
self.store.save_unique("People", "email", "alice@example.com", alice)
assert hasattr(alice, "__node__")
assert isinstance(alice.__node__, Node)
assert alice.__node__ == self.graph.legacy.get_indexed_node(
"People", "email", "alice@example.com")
friend_rels = list(alice.__node__.match_outgoing("KNOWS"))
assert len(friend_rels) == 1
assert bob_node in (rel.end_node for rel in friend_rels)
alice.__rel__ = {"KNOWS": [({}, bob_node), ({}, carol_node)]}
self.store.save_unique("People", "email", "alice@example.com", alice)
friend_rels = list(alice.__node__.match_outgoing("KNOWS"))
assert len(friend_rels) == 2
assert bob_node in (rel.end_node for rel in friend_rels)
assert carol_node in (rel.end_node for rel in friend_rels)
class TestDelete(object):
@pytest.fixture(autouse=True)
def setup(self, graph):
self.graph = graph
self.store = Store(self.graph)
def test_can_delete_object(self):
alice = Person("alice@example.com", "Alice", 34)
self.store.save_unique("People", "email", "alice@example.com", alice)
node = alice.__node__
assert node.exists
self.store.delete(alice)
assert not node.exists
| {
"content_hash": "a0e1fa772a1f2879aedba9c915c91938",
"timestamp": "",
"source": "github",
"line_count": 411,
"max_line_length": 86,
"avg_line_length": 35.5279805352798,
"alnum_prop": 0.576016983974798,
"repo_name": "nicolewhite/py2neo",
"id": "ed23a7b33720bf3011f273b5e9820cda98606066",
"size": "15234",
"binary": false,
"copies": "2",
"ref": "refs/heads/release/2.0.8",
"path": "test/ext/ogm/ogm_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3840"
},
{
"name": "Makefile",
"bytes": "6765"
},
{
"name": "Python",
"bytes": "879521"
},
{
"name": "Shell",
"bytes": "8124"
}
],
"symlink_target": ""
} |
"""
Manages information about the guest.
This class encapsulates libvirt domain provides certain
higher level APIs around the raw libvirt API. These APIs are
then used by all the other libvirt related classes
"""
import time
from lxml import etree
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import importutils
import six
from nova.compute import power_state
from nova import exception
from nova.i18n import _
from nova.virt import hardware
from nova.virt.libvirt import config as vconfig
libvirt = None
LOG = logging.getLogger(__name__)
VIR_DOMAIN_NOSTATE = 0
VIR_DOMAIN_RUNNING = 1
VIR_DOMAIN_BLOCKED = 2
VIR_DOMAIN_PAUSED = 3
VIR_DOMAIN_SHUTDOWN = 4
VIR_DOMAIN_SHUTOFF = 5
VIR_DOMAIN_CRASHED = 6
VIR_DOMAIN_PMSUSPENDED = 7
LIBVIRT_POWER_STATE = {
VIR_DOMAIN_NOSTATE: power_state.NOSTATE,
VIR_DOMAIN_RUNNING: power_state.RUNNING,
# The DOMAIN_BLOCKED state is only valid in Xen. It means that
# the VM is running and the vCPU is idle. So, we map it to RUNNING
VIR_DOMAIN_BLOCKED: power_state.RUNNING,
VIR_DOMAIN_PAUSED: power_state.PAUSED,
# The libvirt API doc says that DOMAIN_SHUTDOWN means the domain
# is being shut down. So technically the domain is still
# running. SHUTOFF is the real powered off state. But we will map
# both to SHUTDOWN anyway.
# http://libvirt.org/html/libvirt-libvirt.html
VIR_DOMAIN_SHUTDOWN: power_state.SHUTDOWN,
VIR_DOMAIN_SHUTOFF: power_state.SHUTDOWN,
VIR_DOMAIN_CRASHED: power_state.CRASHED,
VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED,
}
class Guest(object):
def __init__(self, domain):
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
self._domain = domain
def __repr__(self):
return "<Guest %(id)d %(name)s %(uuid)s>" % {
'id': self.id,
'name': self.name,
'uuid': self.uuid
}
@property
def id(self):
return self._domain.ID()
@property
def uuid(self):
return self._domain.UUIDString()
@property
def name(self):
return self._domain.name()
@property
def _encoded_xml(self):
return encodeutils.safe_decode(self._domain.XMLDesc(0))
@classmethod
def create(cls, xml, host):
"""Create a new Guest
:param xml: XML definition of the domain to create
:param host: host.Host connection to define the guest on
:returns guest.Guest: Guest ready to be launched
"""
try:
if six.PY3 and isinstance(xml, six.binary_type):
xml = xml.decode('utf-8')
guest = host.write_instance_config(xml)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error('Error defining a guest with XML: %s',
encodeutils.safe_decode(xml))
return guest
def launch(self, pause=False):
"""Starts a created guest.
:param pause: Indicates whether to start and pause the guest
"""
flags = pause and libvirt.VIR_DOMAIN_START_PAUSED or 0
try:
return self._domain.createWithFlags(flags)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error('Error launching a defined domain '
'with XML: %s',
self._encoded_xml, errors='ignore')
def poweroff(self):
"""Stops a running guest."""
self._domain.destroy()
def sync_guest_time(self):
"""Try to set VM time to the current value. This is typically useful
when clock wasn't running on the VM for some time (e.g. during
suspension or migration), especially if the time delay exceeds NTP
tolerance.
It is not guaranteed that the time is actually set (it depends on guest
environment, especially QEMU agent presence) or that the set time is
very precise (NTP in the guest should take care of it if needed).
"""
t = time.time()
seconds = int(t)
nseconds = int((t - seconds) * 10 ** 9)
try:
self._domain.setTime(time={'seconds': seconds,
'nseconds': nseconds})
except libvirt.libvirtError as e:
code = e.get_error_code()
if code == libvirt.VIR_ERR_AGENT_UNRESPONSIVE:
LOG.debug('Failed to set time: QEMU agent unresponsive',
instance_uuid=self.uuid)
elif code == libvirt.VIR_ERR_OPERATION_UNSUPPORTED:
LOG.debug('Failed to set time: not supported',
instance_uuid=self.uuid)
elif code == libvirt.VIR_ERR_ARGUMENT_UNSUPPORTED:
LOG.debug('Failed to set time: agent not configured',
instance_uuid=self.uuid)
else:
LOG.warning('Failed to set time: %(reason)s',
{'reason': e}, instance_uuid=self.uuid)
except Exception as ex:
# The highest priority is not to let this method crash and thus
# disrupt its caller in any way. So we swallow this error here,
# to be absolutely safe.
LOG.debug('Failed to set time: %(reason)s',
{'reason': ex}, instance_uuid=self.uuid)
else:
LOG.debug('Time updated to: %d.%09d', seconds, nseconds,
instance_uuid=self.uuid)
def inject_nmi(self):
"""Injects an NMI to a guest."""
self._domain.injectNMI()
def resume(self):
"""Resumes a paused guest."""
self._domain.resume()
def get_interfaces(self):
"""Returns a list of all network interfaces for this domain."""
doc = None
try:
doc = etree.fromstring(self._encoded_xml)
except Exception:
return []
interfaces = []
nodes = doc.findall('./devices/interface/target')
for target in nodes:
interfaces.append(target.get('dev'))
return interfaces
def get_interface_by_cfg(self, cfg):
"""Lookup a full LibvirtConfigGuestInterface with
LibvirtConfigGuestInterface generated
by nova.virt.libvirt.vif.get_config.
:param cfg: config object that represents the guest interface.
:type cfg: LibvirtConfigGuestInterface object
:returns: nova.virt.libvirt.config.LibvirtConfigGuestInterface instance
if found, else None
"""
if cfg:
interfaces = self.get_all_devices(
vconfig.LibvirtConfigGuestInterface)
for interface in interfaces:
# NOTE(leehom) LibvirtConfigGuestInterface get from domain and
# LibvirtConfigGuestInterface generated by
# nova.virt.libvirt.vif.get_config must be identical.
# NOTE(arches) Skip checking target_dev for vhostuser
# vif type; target_dev is not a valid value for vhostuser.
if (interface.mac_addr == cfg.mac_addr and
interface.net_type == cfg.net_type and
interface.source_dev == cfg.source_dev and
(cfg.net_type == 'vhostuser' or
interface.target_dev == cfg.target_dev) and
interface.vhostuser_path == cfg.vhostuser_path):
return interface
def get_vcpus_info(self):
"""Returns virtual cpus information of guest.
:returns: guest.VCPUInfo
"""
vcpus = self._domain.vcpus()
for vcpu in vcpus[0]:
yield VCPUInfo(
id=vcpu[0], cpu=vcpu[3], state=vcpu[1], time=vcpu[2])
def delete_configuration(self, support_uefi=False):
"""Undefines a domain from hypervisor."""
try:
flags = libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE
if support_uefi:
flags |= libvirt.VIR_DOMAIN_UNDEFINE_NVRAM
self._domain.undefineFlags(flags)
except libvirt.libvirtError:
LOG.debug("Error from libvirt during undefineFlags for guest "
"%d. Retrying with undefine", self.id)
self._domain.undefine()
except AttributeError:
# Older versions of libvirt don't support undefine flags,
# trying to remove managed image
try:
if self._domain.hasManagedSaveImage(0):
self._domain.managedSaveRemove(0)
except AttributeError:
pass
self._domain.undefine()
def has_persistent_configuration(self):
"""Whether domain config is persistently stored on the host."""
return self._domain.isPersistent()
def attach_device(self, conf, persistent=False, live=False):
"""Attaches device to the guest.
:param conf: A LibvirtConfigObject of the device to attach
:param persistent: A bool to indicate whether the change is
persistent or not
:param live: A bool to indicate whether it affect the guest
in running state
"""
flags = persistent and libvirt.VIR_DOMAIN_AFFECT_CONFIG or 0
flags |= live and libvirt.VIR_DOMAIN_AFFECT_LIVE or 0
device_xml = conf.to_xml()
if six.PY3 and isinstance(device_xml, six.binary_type):
device_xml = device_xml.decode('utf-8')
LOG.debug("attach device xml: %s", device_xml)
self._domain.attachDeviceFlags(device_xml, flags=flags)
def get_config(self):
"""Returns the config instance for a guest
:returns: LibvirtConfigGuest instance
"""
config = vconfig.LibvirtConfigGuest()
config.parse_str(self._domain.XMLDesc(0))
return config
def get_disk(self, device):
"""Returns the disk mounted at device
:returns LivirtConfigGuestDisk: mounted at device or None
"""
try:
doc = etree.fromstring(self._domain.XMLDesc(0))
except Exception:
return None
# FIXME(lyarwood): Workaround for the device being either a target dev
# when called via swap_volume or source file when called via
# live_snapshot. This should be removed once both are refactored to use
# only the target dev of the device.
node = doc.find("./devices/disk/target[@dev='%s'].." % device)
if node is None:
node = doc.find("./devices/disk/source[@file='%s'].." % device)
if node is not None:
conf = vconfig.LibvirtConfigGuestDisk()
conf.parse_dom(node)
return conf
def get_all_disks(self):
"""Returns all the disks for a guest
:returns: a list of LibvirtConfigGuestDisk instances
"""
return self.get_all_devices(vconfig.LibvirtConfigGuestDisk)
def get_all_devices(self, devtype=None):
"""Returns all devices for a guest
:param devtype: a LibvirtConfigGuestDevice subclass class
:returns: a list of LibvirtConfigGuestDevice instances
"""
try:
config = vconfig.LibvirtConfigGuest()
config.parse_str(
self._domain.XMLDesc(0))
except Exception:
return []
devs = []
for dev in config.devices:
if (devtype is None or
isinstance(dev, devtype)):
devs.append(dev)
return devs
def detach_device_with_retry(self, get_device_conf_func, device, live,
max_retry_count=7, inc_sleep_time=2,
max_sleep_time=30,
alternative_device_name=None):
"""Detaches a device from the guest. After the initial detach request,
a function is returned which can be used to ensure the device is
successfully removed from the guest domain (retrying the removal as
necessary).
:param get_device_conf_func: function which takes device as a parameter
and returns the configuration for device
:param device: device to detach
:param live: bool to indicate whether it affects the guest in running
state
:param max_retry_count: number of times the returned function will
retry a detach before failing
:param inc_sleep_time: incremental time to sleep in seconds between
detach retries
:param max_sleep_time: max sleep time in seconds beyond which the sleep
time will not be incremented using param
inc_sleep_time. On reaching this threshold,
max_sleep_time will be used as the sleep time.
:param alternative_device_name: This is an alternative identifier for
the device if device is not an ID, used solely for error messages.
"""
alternative_device_name = alternative_device_name or device
def _try_detach_device(conf, persistent=False, live=False):
# Raise DeviceNotFound if the device isn't found during detach
try:
self.detach_device(conf, persistent=persistent, live=live)
if get_device_conf_func(device) is None:
LOG.debug('Successfully detached device %s from guest. '
'Persistent? %s. Live? %s',
device, persistent, live)
except libvirt.libvirtError as ex:
with excutils.save_and_reraise_exception(reraise=False) as ctx:
errcode = ex.get_error_code()
if errcode in (libvirt.VIR_ERR_OPERATION_FAILED,
libvirt.VIR_ERR_INTERNAL_ERROR):
errmsg = ex.get_error_message()
if 'not found' in errmsg:
# This will be raised if the live domain
# detach fails because the device is not found
raise exception.DeviceNotFound(
device=alternative_device_name)
elif errcode == libvirt.VIR_ERR_INVALID_ARG:
errmsg = ex.get_error_message()
if 'no target device' in errmsg:
# This will be raised if the persistent domain
# detach fails because the device is not found
raise exception.DeviceNotFound(
device=alternative_device_name)
# Re-raise the original exception if we're not raising
# DeviceNotFound instead. This will avoid logging of a
# "Original exception being dropped" traceback.
ctx.reraise = True
conf = get_device_conf_func(device)
if conf is None:
raise exception.DeviceNotFound(device=alternative_device_name)
persistent = self.has_persistent_configuration()
LOG.debug('Attempting initial detach for device %s',
alternative_device_name)
try:
_try_detach_device(conf, persistent, live)
except exception.DeviceNotFound:
# NOTE(melwitt): There are effectively two configs for an instance.
# The persistent config (affects instance upon next boot) and the
# live config (affects running instance). When we detach a device,
# we need to detach it from both configs if the instance has a
# persistent config and a live config. If we tried to detach the
# device with persistent=True and live=True and it was not found,
# we should still try to detach from the live config, so continue.
if persistent and live:
pass
else:
raise
LOG.debug('Start retrying detach until device %s is gone.',
alternative_device_name)
@loopingcall.RetryDecorator(max_retry_count=max_retry_count,
inc_sleep_time=inc_sleep_time,
max_sleep_time=max_sleep_time,
exceptions=exception.DeviceDetachFailed)
def _do_wait_and_retry_detach():
config = get_device_conf_func(device)
if config is not None:
# Device is already detached from persistent config
# and only the live config needs to be updated.
_try_detach_device(config, persistent=False, live=live)
reason = _("Unable to detach the device from the live config.")
raise exception.DeviceDetachFailed(
device=alternative_device_name, reason=reason)
return _do_wait_and_retry_detach
def detach_device(self, conf, persistent=False, live=False):
"""Detaches device to the guest.
:param conf: A LibvirtConfigObject of the device to detach
:param persistent: A bool to indicate whether the change is
persistent or not
:param live: A bool to indicate whether it affect the guest
in running state
"""
flags = persistent and libvirt.VIR_DOMAIN_AFFECT_CONFIG or 0
flags |= live and libvirt.VIR_DOMAIN_AFFECT_LIVE or 0
device_xml = conf.to_xml()
if six.PY3 and isinstance(device_xml, six.binary_type):
device_xml = device_xml.decode('utf-8')
LOG.debug("detach device xml: %s", device_xml)
self._domain.detachDeviceFlags(device_xml, flags=flags)
def get_xml_desc(self, dump_inactive=False, dump_sensitive=False,
dump_migratable=False):
"""Returns xml description of guest.
:param dump_inactive: Dump inactive domain information
:param dump_sensitive: Dump security sensitive information
:param dump_migratable: Dump XML suitable for migration
:returns string: XML description of the guest
"""
flags = dump_inactive and libvirt.VIR_DOMAIN_XML_INACTIVE or 0
flags |= dump_sensitive and libvirt.VIR_DOMAIN_XML_SECURE or 0
flags |= dump_migratable and libvirt.VIR_DOMAIN_XML_MIGRATABLE or 0
return self._domain.XMLDesc(flags=flags)
def save_memory_state(self):
"""Saves the domain's memory state. Requires running domain.
raises: raises libvirtError on error
"""
self._domain.managedSave(0)
def get_block_device(self, disk):
"""Returns a block device wrapper for disk."""
return BlockDevice(self, disk)
def set_user_password(self, user, new_pass):
"""Configures a new user password."""
self._domain.setUserPassword(user, new_pass, 0)
def _get_domain_info(self):
"""Returns information on Guest.
:returns list: [state, maxMem, memory, nrVirtCpu, cpuTime]
"""
return self._domain.info()
def get_info(self, host):
"""Retrieve information from libvirt for a specific instance name.
If a libvirt error is encountered during lookup, we might raise a
NotFound exception or Error exception depending on how severe the
libvirt error is.
:returns hardware.InstanceInfo:
"""
try:
dom_info = self._get_domain_info()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=self.uuid)
msg = (_('Error from libvirt while getting domain info for '
'%(instance_name)s: [Error Code %(error_code)s] %(ex)s') %
{'instance_name': self.name,
'error_code': error_code,
'ex': ex})
raise exception.InternalError(msg)
return hardware.InstanceInfo(
state=LIBVIRT_POWER_STATE[dom_info[0]],
internal_id=self.id)
def get_power_state(self, host):
return self.get_info(host).state
def is_active(self):
"Determines whether guest is currently running."
return self._domain.isActive()
def freeze_filesystems(self):
"""Freeze filesystems within guest."""
self._domain.fsFreeze()
def thaw_filesystems(self):
"""Thaw filesystems within guest."""
self._domain.fsThaw()
def snapshot(self, conf, no_metadata=False,
disk_only=False, reuse_ext=False, quiesce=False):
"""Creates a guest snapshot.
:param conf: libvirt.LibvirtConfigGuestSnapshotDisk
:param no_metadata: Make snapshot without remembering it
:param disk_only: Disk snapshot, no system checkpoint
:param reuse_ext: Reuse any existing external files
:param quiesce: Use QGA to quiece all mounted file systems
"""
flags = no_metadata and (
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA or 0)
flags |= disk_only and (
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY or 0)
flags |= reuse_ext and (
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT or 0)
flags |= quiesce and libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE or 0
device_xml = conf.to_xml()
if six.PY3 and isinstance(device_xml, six.binary_type):
device_xml = device_xml.decode('utf-8')
self._domain.snapshotCreateXML(device_xml, flags=flags)
def shutdown(self):
"""Shutdown guest"""
self._domain.shutdown()
def pause(self):
"""Suspends an active guest
Process is frozen without further access to CPU resources and
I/O but the memory used by the domain at the hypervisor level
will stay allocated.
See method "resume()" to reactive guest.
"""
self._domain.suspend()
def migrate(self, destination, migrate_uri=None, migrate_disks=None,
destination_xml=None, flags=0, bandwidth=0):
"""Migrate guest object from its current host to the destination
:param destination: URI of host destination where guest will be migrate
:param migrate_uri: URI for invoking the migration
:param migrate_disks: List of disks to be migrated
:param destination_xml: The guest XML to be used on the target host
:param flags: May be one of more of the following:
VIR_MIGRATE_LIVE Do not pause the VM during migration
VIR_MIGRATE_PEER2PEER Direct connection between source &
destination hosts
VIR_MIGRATE_TUNNELLED Tunnel migration data over the
libvirt RPC channel
VIR_MIGRATE_PERSIST_DEST If the migration is successful,
persist the domain on the
destination host.
VIR_MIGRATE_UNDEFINE_SOURCE If the migration is successful,
undefine the domain on the
source host.
VIR_MIGRATE_NON_SHARED_INC Migration with non-shared
storage with incremental disk
copy
VIR_MIGRATE_AUTO_CONVERGE Slow down domain to make sure it does
not change its memory faster than a
hypervisor can transfer the changed
memory to the destination host
VIR_MIGRATE_POSTCOPY Tell libvirt to enable post-copy migration
VIR_MIGRATE_TLS Use QEMU-native TLS
:param bandwidth: The maximum bandwidth in MiB/s
"""
params = {}
# In migrateToURI3 these parameters are extracted from the
# `params` dict
params['bandwidth'] = bandwidth
if destination_xml:
params['destination_xml'] = destination_xml
if migrate_disks:
params['migrate_disks'] = migrate_disks
if migrate_uri:
params['migrate_uri'] = migrate_uri
# Due to a quirk in the libvirt python bindings,
# VIR_MIGRATE_NON_SHARED_INC with an empty migrate_disks is
# interpreted as "block migrate all writable disks" rather than
# "don't block migrate any disks". This includes attached
# volumes, which will potentially corrupt data on those
# volumes. Consequently we need to explicitly unset
# VIR_MIGRATE_NON_SHARED_INC if there are no disks to be block
# migrated.
if (flags & libvirt.VIR_MIGRATE_NON_SHARED_INC != 0 and
not params.get('migrate_disks')):
flags &= ~libvirt.VIR_MIGRATE_NON_SHARED_INC
# In the Python2 libvirt bindings, strings passed to
# migrateToURI3 via params must not be unicode.
if six.PY2:
params = {key: encodeutils.to_utf8(value)
if isinstance(value, six.text_type) else value
for key, value in params.items()}
self._domain.migrateToURI3(
destination, params=params, flags=flags)
def abort_job(self):
"""Requests to abort current background job"""
self._domain.abortJob()
def migrate_configure_max_downtime(self, mstime):
"""Sets maximum time for which domain is allowed to be paused
:param mstime: Downtime in milliseconds.
"""
self._domain.migrateSetMaxDowntime(mstime)
def migrate_start_postcopy(self):
"""Switch running live migration to post-copy mode"""
self._domain.migrateStartPostCopy()
def get_job_info(self):
"""Get job info for the domain
Query the libvirt job info for the domain (ie progress
of migration, or snapshot operation)
:returns: a JobInfo of guest
"""
if JobInfo._have_job_stats:
try:
stats = self._domain.jobStats()
return JobInfo(**stats)
except libvirt.libvirtError as ex:
if ex.get_error_code() == libvirt.VIR_ERR_NO_SUPPORT:
# Remote libvirt doesn't support new API
LOG.debug("Missing remote virDomainGetJobStats: %s", ex)
JobInfo._have_job_stats = False
return JobInfo._get_job_stats_compat(self._domain)
elif ex.get_error_code() in (
libvirt.VIR_ERR_NO_DOMAIN,
libvirt.VIR_ERR_OPERATION_INVALID):
# Transient guest finished migration, so it has gone
# away completclsely
LOG.debug("Domain has shutdown/gone away: %s", ex)
return JobInfo(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
else:
LOG.debug("Failed to get job stats: %s", ex)
raise
except AttributeError as ex:
# Local python binding doesn't support new API
LOG.debug("Missing local virDomainGetJobStats: %s", ex)
JobInfo._have_job_stats = False
return JobInfo._get_job_stats_compat(self._domain)
else:
return JobInfo._get_job_stats_compat(self._domain)
class BlockDevice(object):
"""Wrapper around block device API"""
REBASE_DEFAULT_BANDWIDTH = 0 # in MiB/s - 0 unlimited
COMMIT_DEFAULT_BANDWIDTH = 0 # in MiB/s - 0 unlimited
def __init__(self, guest, disk):
self._guest = guest
self._disk = disk
def abort_job(self, async_=False, pivot=False):
"""Request to cancel a live block device job
:param async_: Cancel the block device job (e.g. 'copy' or
'commit'), and return as soon as possible, without
waiting for job completion
:param pivot: Pivot to the destination image when ending a
'copy' or "active commit" (meaning: merging the
contents of current active disk into its backing
file) job
"""
flags = async_ and libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC or 0
flags |= pivot and libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT or 0
self._guest._domain.blockJobAbort(self._disk, flags=flags)
def get_job_info(self):
"""Returns information about job currently running
:returns: BlockDeviceJobInfo, or None if no job exists
:raises: libvirt.libvirtError on error fetching block job info
"""
# libvirt's blockJobInfo() raises libvirt.libvirtError if there was an
# error. It returns {} if the job no longer exists, or a fully
# populated dict if the job exists.
status = self._guest._domain.blockJobInfo(self._disk, flags=0)
# The job no longer exists
if not status:
return None
return BlockDeviceJobInfo(
job=status['type'],
bandwidth=status['bandwidth'],
cur=status['cur'],
end=status['end'])
def rebase(self, base, shallow=False, reuse_ext=False,
copy=False, relative=False, copy_dev=False):
"""Copy data from backing chain into a new disk
This copies data from backing file(s) into overlay(s), giving
control over several aspects like what part of a disk image
chain to be copied, whether to reuse an existing destination
file, etc. And updates the backing file to the new disk
:param shallow: Limit copy to top of the source backing chain
:param reuse_ext: Reuse an existing external file that was
pre-created
:param copy: Start a copy job
:param relative: Keep backing chain referenced using relative names
:param copy_dev: Treat the destination as type="block"
"""
flags = shallow and libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW or 0
flags |= reuse_ext and libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT or 0
flags |= copy and libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY or 0
flags |= copy_dev and libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY_DEV or 0
flags |= relative and libvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE or 0
return self._guest._domain.blockRebase(
self._disk, base, self.REBASE_DEFAULT_BANDWIDTH, flags=flags)
def commit(self, base, top, relative=False):
"""Merge data from overlays into backing file
This live merges (or "commits") contents from backing files into
overlays, thus reducing the length of a disk image chain.
:param relative: Keep backing chain referenced using relative names
"""
flags = relative and libvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE or 0
return self._guest._domain.blockCommit(
self._disk, base, top, self.COMMIT_DEFAULT_BANDWIDTH, flags=flags)
def resize(self, size_kb):
"""Resize block device to KiB size"""
self._guest._domain.blockResize(self._disk, size_kb)
def is_job_complete(self):
"""Return True if the job is complete, False otherwise
:returns: True if the job is complete, False otherwise
:raises: libvirt.libvirtError on error fetching block job info
"""
# NOTE(mdbooth): This method polls for block job completion. It returns
# true if either we get a status which indicates completion, or there
# is no longer a record of the job. Ideally this method and its
# callers would be rewritten to consume libvirt events from the job.
# This would provide a couple of advantages. Firstly, as it would no
# longer be polling it would notice completion immediately rather than
# at the next 0.5s check, and would also consume fewer resources.
# Secondly, with the current method we only know that 'no job'
# indicates completion. It does not necessarily indicate successful
# completion: the job could have failed, or been cancelled. When
# polling for block job info we have no way to detect this, so we
# assume success.
status = self.get_job_info()
# If the job no longer exists, it is because it has completed
# NOTE(mdbooth): See comment above: it may not have succeeded.
if status is None:
return True
# NOTE(slaweq): because of bug in libvirt, which is described in
# http://www.redhat.com/archives/libvir-list/2016-September/msg00017.html
# if status.end == 0 job is not started yet so it is not finished
# NOTE(mdbooth): The fix was committed upstream here:
# http://libvirt.org/git/?p=libvirt.git;a=commit;h=988218c
# The earliest tag which contains this commit is v2.3.0-rc1, so we
# should be able to remove this workaround when MIN_LIBVIRT_VERSION
# reaches 2.3.0, or we move to handling job events instead.
# NOTE(lyarwood): Use the mirror element to determine if we can pivot
# to the new disk once blockjobinfo reports progress as complete.
if status.end != 0 and status.cur == status.end:
disk = self._guest.get_disk(self._disk)
if disk and disk.mirror:
return disk.mirror.ready == 'yes'
return False
def blockStats(self):
"""Extracts block device statistics for a domain"""
return self._guest._domain.blockStats(self._disk)
class VCPUInfo(object):
def __init__(self, id, cpu, state, time):
"""Structure for information about guest vcpus.
:param id: The virtual cpu number
:param cpu: The host cpu currently associated
:param state: The running state of the vcpu (0 offline, 1 running, 2
blocked on resource)
:param time: The cpu time used in nanoseconds
"""
self.id = id
self.cpu = cpu
self.state = state
self.time = time
class BlockDeviceJobInfo(object):
def __init__(self, job, bandwidth, cur, end):
"""Structure for information about running job.
:param job: The running job (0 placeholder, 1 pull,
2 copy, 3 commit, 4 active commit)
:param bandwidth: Used in MiB/s
:param cur: Indicates the position between 0 and 'end'
:param end: Indicates the position for this operation
"""
self.job = job
self.bandwidth = bandwidth
self.cur = cur
self.end = end
class JobInfo(object):
"""Information about libvirt background jobs
This class encapsulates information about libvirt
background jobs. It provides a mapping from either
the old virDomainGetJobInfo API which returned a
fixed list of fields, or the modern virDomainGetJobStats
which returns an extendable dict of fields.
"""
_have_job_stats = True
def __init__(self, **kwargs):
self.type = kwargs.get("type", libvirt.VIR_DOMAIN_JOB_NONE)
self.time_elapsed = kwargs.get("time_elapsed", 0)
self.time_remaining = kwargs.get("time_remaining", 0)
self.downtime = kwargs.get("downtime", 0)
self.setup_time = kwargs.get("setup_time", 0)
self.data_total = kwargs.get("data_total", 0)
self.data_processed = kwargs.get("data_processed", 0)
self.data_remaining = kwargs.get("data_remaining", 0)
self.memory_total = kwargs.get("memory_total", 0)
self.memory_processed = kwargs.get("memory_processed", 0)
self.memory_remaining = kwargs.get("memory_remaining", 0)
self.memory_iteration = kwargs.get("memory_iteration", 0)
self.memory_constant = kwargs.get("memory_constant", 0)
self.memory_normal = kwargs.get("memory_normal", 0)
self.memory_normal_bytes = kwargs.get("memory_normal_bytes", 0)
self.memory_bps = kwargs.get("memory_bps", 0)
self.disk_total = kwargs.get("disk_total", 0)
self.disk_processed = kwargs.get("disk_processed", 0)
self.disk_remaining = kwargs.get("disk_remaining", 0)
self.disk_bps = kwargs.get("disk_bps", 0)
self.comp_cache = kwargs.get("compression_cache", 0)
self.comp_bytes = kwargs.get("compression_bytes", 0)
self.comp_pages = kwargs.get("compression_pages", 0)
self.comp_cache_misses = kwargs.get("compression_cache_misses", 0)
self.comp_overflow = kwargs.get("compression_overflow", 0)
@classmethod
def _get_job_stats_compat(cls, dom):
# Make the old virDomainGetJobInfo method look similar to the
# modern virDomainGetJobStats method
try:
info = dom.jobInfo()
except libvirt.libvirtError as ex:
# When migration of a transient guest completes, the guest
# goes away so we'll see NO_DOMAIN error code
#
# When migration of a persistent guest completes, the guest
# merely shuts off, but libvirt unhelpfully raises an
# OPERATION_INVALID error code
#
# Lets pretend both of these mean success
if ex.get_error_code() in (libvirt.VIR_ERR_NO_DOMAIN,
libvirt.VIR_ERR_OPERATION_INVALID):
LOG.debug("Domain has shutdown/gone away: %s", ex)
return cls(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
else:
LOG.debug("Failed to get job info: %s", ex)
raise
return cls(
type=info[0],
time_elapsed=info[1],
time_remaining=info[2],
data_total=info[3],
data_processed=info[4],
data_remaining=info[5],
memory_total=info[6],
memory_processed=info[7],
memory_remaining=info[8],
disk_total=info[9],
disk_processed=info[10],
disk_remaining=info[11])
| {
"content_hash": "242e2173b973e01d23948b8c4d8221eb",
"timestamp": "",
"source": "github",
"line_count": 933,
"max_line_length": 81,
"avg_line_length": 41.264737406216504,
"alnum_prop": 0.5978441558441558,
"repo_name": "rahulunair/nova",
"id": "e8d4d637a22270d2d438d5a3f786294064f53981",
"size": "39461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/virt/libvirt/guest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "3325"
},
{
"name": "Python",
"bytes": "22804450"
},
{
"name": "Shell",
"bytes": "41649"
},
{
"name": "Smarty",
"bytes": "472764"
}
],
"symlink_target": ""
} |
from django.conf import settings
def report_exception(exception=None):
if getattr(settings, "SENTRY_ACTIVE", False):
from sentry_sdk import capture_exception
capture_exception(exception)
| {
"content_hash": "e581f06b0bc29e925b5699a15dfa264e",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 49,
"avg_line_length": 26.25,
"alnum_prop": 0.7285714285714285,
"repo_name": "DXCanas/content-curation",
"id": "aaa3dc10a3b7863e4b023b7159f5b0e8a9509249",
"size": "210",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "contentcuration/contentcuration/utils/sentry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "173955"
},
{
"name": "Dockerfile",
"bytes": "2215"
},
{
"name": "HTML",
"bytes": "503467"
},
{
"name": "JavaScript",
"bytes": "601189"
},
{
"name": "Makefile",
"bytes": "3409"
},
{
"name": "Python",
"bytes": "813881"
},
{
"name": "Shell",
"bytes": "6970"
},
{
"name": "Smarty",
"bytes": "6584"
},
{
"name": "Vue",
"bytes": "21539"
}
],
"symlink_target": ""
} |
'''
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from typing import Any, Dict, Optional, Sequence, Tuple, Type, Union, cast
# External imports
from jinja2 import Template
# Bokeh imports
from ..core.templates import AUTOLOAD_JS, AUTOLOAD_TAG, FILE, MACROS, ROOT_DIV
from ..document.document import DEFAULT_TITLE, Document
from ..model import Model
from ..resources import CSSResources, JSResources, Resources
from ..themes import Theme
from .bundle import Script, bundle_for_objs_and_resources
from .elements import html_page_for_render_items, script_for_render_items
from .util import (
FromCurdoc,
OutputDocumentFor,
RenderRoot,
standalone_docs_json,
standalone_docs_json_and_render_items,
)
from .wrappers import wrap_in_onload
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'autoload_static',
'components',
'file_html',
'json_item',
)
ModelLike = Union[Model, Document]
ModelLikeCollection = Union[Sequence[ModelLike], Dict[str, ModelLike]]
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
ThemeLike = Union[Theme, Type[FromCurdoc]]
def autoload_static(model: Union[Model, Document], resources: Resources, script_path: str) -> Tuple[str, str]:
''' Return JavaScript code and a script tag that can be used to embed
Bokeh Plots.
The data for the plot is stored directly in the returned JavaScript code.
Args:
model (Model or Document) :
resources (Resources) :
script_path (str) :
Returns:
(js, tag) :
JavaScript code to be saved at ``script_path`` and a ``<script>``
tag to load it
Raises:
ValueError
'''
# TODO: maybe warn that it's not exactly useful, but technically possible
# if resources.mode == 'inline':
# raise ValueError("autoload_static() requires non-inline resources")
if isinstance(model, Model):
models = [model]
elif isinstance (model, Document):
models = model.roots
else:
raise ValueError("autoload_static expects a single Model or Document")
with OutputDocumentFor(models):
(docs_json, [render_item]) = standalone_docs_json_and_render_items([model])
bundle = bundle_for_objs_and_resources(None, resources)
bundle.add(Script(script_for_render_items(docs_json, [render_item])))
(_, elementid) = list(render_item.roots.to_json().items())[0]
js = wrap_in_onload(AUTOLOAD_JS.render(bundle=bundle, elementid=elementid))
tag = AUTOLOAD_TAG.render(
src_path = script_path,
elementid = elementid,
)
return js, tag
def components(models: Union[ModelLike, ModelLikeCollection], wrap_script: bool = True,
wrap_plot_info: bool = True, theme: ThemeLike = FromCurdoc) -> Tuple[str, Any]:
''' Return HTML components to embed a Bokeh plot. The data for the plot is
stored directly in the returned HTML.
An example can be found in examples/embed/embed_multiple.py
The returned components assume that BokehJS resources are **already loaded**.
The html template in which they will be embedded needs to include the following
scripts tags. The widgets and tables resources are only necessary if the components
make use of widgets and tables.
.. code-block:: html
<script src="https://cdn.bokeh.org/bokeh/release/bokeh-x.y.z.min.js"></script>
<script src="https://cdn.bokeh.org/bokeh/release/bokeh-widgets-x.y.z.min.js"></script>
<script src="https://cdn.bokeh.org/bokeh/release/bokeh-tables-x.y.z.min.js"></script>
Note that in Jupyter Notebooks, it is not possible to use components and show in
the same notebook cell.
Args:
models (Model|list|dict|tuple) :
A single Model, a list/tuple of Models, or a dictionary of keys and Models.
wrap_script (boolean, optional) :
If True, the returned javascript is wrapped in a script tag.
(default: True)
wrap_plot_info (boolean, optional) : If True, returns ``<div>`` strings.
Otherwise, return dicts that can be used to build your own divs.
(default: True)
If False, the returned dictionary contains the following information:
.. code-block:: python
{
'modelid': 'The model ID, used with Document.get_model_by_id',
'elementid': 'The css identifier the BokehJS will look for to target the plot',
'docid': 'Used by Bokeh to find the doc embedded in the returned script',
}
theme (Theme, optional) :
Defaults to the ``Theme`` instance in the current document.
Setting this to ``None`` uses the default theme or the theme
already specified in the document. Any other value must be an
instance of the ``Theme`` class.
Returns:
UTF-8 encoded *(script, div[s])* or *(raw_script, plot_info[s])*
Examples:
With default wrapping parameter values:
.. code-block:: python
components(plot)
# => (script, plot_div)
components((plot1, plot2))
# => (script, (plot1_div, plot2_div))
components({"Plot 1": plot1, "Plot 2": plot2})
# => (script, {"Plot 1": plot1_div, "Plot 2": plot2_div})
Examples:
With wrapping parameters set to ``False``:
.. code-block:: python
components(plot, wrap_script=False, wrap_plot_info=False)
# => (javascript, plot_dict)
components((plot1, plot2), wrap_script=False, wrap_plot_info=False)
# => (javascript, (plot1_dict, plot2_dict))
components({"Plot 1": plot1, "Plot 2": plot2}, wrap_script=False, wrap_plot_info=False)
# => (javascript, {"Plot 1": plot1_dict, "Plot 2": plot2_dict})
'''
# 1) Convert single items and dicts into list
was_single_object = isinstance(models, Model) or isinstance(models, Document)
models = _check_models_or_docs(models)
# now convert dict to list, saving keys in the same order
model_keys = None
dict_type: Type[Dict[Any, Any]] = dict
if isinstance(models, dict):
model_keys = models.keys()
dict_type = models.__class__
values = []
# don't just use .values() to ensure we are in the same order as key list
for k in model_keys:
values.append(models[k])
models = values
# 2) Append models to one document. Either pre-existing or new and render
with OutputDocumentFor(models, apply_theme=theme):
(docs_json, [render_item]) = standalone_docs_json_and_render_items(models)
bundle = bundle_for_objs_and_resources(None, None)
bundle.add(Script(script_for_render_items(docs_json, [render_item])))
script = bundle.scripts(tag=wrap_script)
def div_for_root(root: RenderRoot) -> str:
return ROOT_DIV.render(root=root, macros=MACROS)
if wrap_plot_info:
results = list(div_for_root(root) for root in render_item.roots)
else:
results = render_item.roots
# 3) convert back to the input shape
result: Any
if was_single_object:
result = results[0]
elif model_keys is not None:
result = dict_type(zip(model_keys, results))
else:
result = tuple(results)
return script, result
def file_html(models: Union[Model, Document, Sequence[Model]],
resources: Union[Resources, Tuple[JSResources, CSSResources]],
title: Optional[str] = None,
template: Union[Template, str] = FILE,
template_variables: Dict[str, Any] = {},
theme: ThemeLike = FromCurdoc,
suppress_callback_warning: bool = False,
_always_new: bool = False) -> str:
''' Return an HTML document that embeds Bokeh Model or Document objects.
The data for the plot is stored directly in the returned HTML, with
support for customizing the JS/CSS resources independently and
customizing the jinja2 template.
Args:
models (Model or Document or seq[Model]) : Bokeh object or objects to render
typically a Model or Document
resources (Resources or tuple(JSResources or None, CSSResources or None)) :
A resource configuration for Bokeh JS & CSS assets.
title (str, optional) :
A title for the HTML document ``<title>`` tags or None. (default: None)
If None, attempt to automatically find the Document title from the given
plot objects.
template (Template, optional) : HTML document template (default: FILE)
A Jinja2 Template, see bokeh.core.templates.FILE for the required
template parameters
template_variables (dict, optional) : variables to be used in the Jinja2
template. If used, the following variable names will be overwritten:
title, bokeh_js, bokeh_css, plot_script, plot_div
theme (Theme, optional) :
Defaults to the ``Theme`` instance in the current document.
Setting this to ``None`` uses the default theme or the theme
already specified in the document. Any other value must be an
instance of the ``Theme`` class.
suppress_callback_warning (bool, optional) :
Normally generating standalone HTML from a Bokeh Document that has
Python callbacks will result in a warning stating that the callbacks
cannot function. However, this warning can be suppressed by setting
this value to True (default: False)
Returns:
UTF-8 encoded HTML
'''
models_seq: Sequence[Model] = []
if isinstance(models, Model):
models_seq = [models]
elif isinstance(models, Document):
models_seq = models.roots
else:
models_seq = models
with OutputDocumentFor(models_seq, apply_theme=theme, always_new=_always_new) as doc:
(docs_json, render_items) = standalone_docs_json_and_render_items(models_seq, suppress_callback_warning=suppress_callback_warning)
title = _title_from_models(models_seq, title)
bundle = bundle_for_objs_and_resources([doc], resources)
return html_page_for_render_items(bundle, docs_json, render_items, title=title,
template=template, template_variables=template_variables)
def json_item(model: Model, target: Optional[str] = None, theme: ThemeLike = FromCurdoc) -> Any: # TODO: TypedDict?
''' Return a JSON block that can be used to embed standalone Bokeh content.
Args:
model (Model) :
The Bokeh object to embed
target (string, optional)
A div id to embed the model into. If None, the target id must
be supplied in the JavaScript call.
theme (Theme, optional) :
Defaults to the ``Theme`` instance in the current document.
Setting this to ``None`` uses the default theme or the theme
already specified in the document. Any other value must be an
instance of the ``Theme`` class.
Returns:
JSON-like
This function returns a JSON block that can be consumed by the BokehJS
function ``Bokeh.embed.embed_item``. As an example, a Flask endpoint for
``/plot`` might return the following content to embed a Bokeh plot into
a div with id *"myplot"*:
.. code-block:: python
@app.route('/plot')
def plot():
p = make_plot('petal_width', 'petal_length')
return json.dumps(json_item(p, "myplot"))
Then a web page can retrieve this JSON and embed the plot by calling
``Bokeh.embed.embed_item``:
.. code-block:: html
<script>
fetch('/plot')
.then(function(response) { return response.json(); })
.then(function(item) { Bokeh.embed.embed_item(item); })
</script>
Alternatively, if is more convenient to supply the target div id directly
in the page source, that is also possible. If `target_id` is omitted in the
call to this function:
.. code-block:: python
return json.dumps(json_item(p))
Then the value passed to ``embed_item`` is used:
.. code-block:: javascript
Bokeh.embed.embed_item(item, "myplot");
'''
with OutputDocumentFor([model], apply_theme=theme) as doc:
doc.title = ""
docs_json = standalone_docs_json([model])
doc = list(docs_json.values())[0]
root_id = doc['roots']['root_ids'][0]
return {
'target_id' : target,
'root_id' : root_id,
'doc' : doc,
}
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _check_models_or_docs(models: Union[ModelLike, ModelLikeCollection]) -> ModelLikeCollection:
'''
'''
input_type_valid = False
# Check for single item
if isinstance(models, (Model, Document)):
models = [models]
# Check for sequence
if isinstance(models, Sequence) and all(isinstance(x, (Model, Document)) for x in models):
input_type_valid = True
if isinstance(models, dict) and \
all(isinstance(x, str) for x in models.keys()) and \
all(isinstance(x, (Model, Document)) for x in models.values()):
input_type_valid = True
if not input_type_valid:
raise ValueError(
'Input must be a Model, a Document, a Sequence of Models and Document, or a dictionary from string to Model and Document'
)
return models
def _title_from_models(models: Sequence[Union[Model, Document]], title: Optional[str]) -> str:
# use override title
if title is not None:
return title
# use title from any listed document
for p in models:
if isinstance(p, Document):
return p.title
# use title from any model's document
for p in cast(Sequence[Model], models):
if p.document is not None:
return p.document.title
# use default title
return DEFAULT_TITLE
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| {
"content_hash": "0bac7c3783f0ba8620b695ba267b5d32",
"timestamp": "",
"source": "github",
"line_count": 430,
"max_line_length": 138,
"avg_line_length": 35.33720930232558,
"alnum_prop": 0.5886804870023034,
"repo_name": "ericmjl/bokeh",
"id": "92caa10a48c89c52e942725104a7fe87624e5a44",
"size": "15526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bokeh/embed/standalone.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "102094"
},
{
"name": "CoffeeScript",
"bytes": "462899"
},
{
"name": "HTML",
"bytes": "46193"
},
{
"name": "JavaScript",
"bytes": "24563"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "Python",
"bytes": "2705341"
},
{
"name": "Shell",
"bytes": "8995"
},
{
"name": "TypeScript",
"bytes": "1468288"
}
],
"symlink_target": ""
} |
"""Sliver manager API.
This module exposes an XMLRPC interface that allows PlanetLab users to
create/destroy slivers with delegated instantiation, start and stop
slivers, make resource loans, and examine resource allocations. The
XMLRPC is provided on a localhost-only TCP port as well as via a Unix
domain socket that is accessible by ssh-ing into a delegate account
with the forward_api_calls shell.
"""
import SimpleXMLRPCServer
import SocketServer
import errno
import os
import pwd
import socket
import struct
import threading
import xmlrpclib
import sys
import database
import tools
from api_calls import *
import logger
try:
sys.path.append("/etc/planetlab")
from plc_config import *
except:
logger.log("api: Warning: Configuration file /etc/planetlab/plc_config.py not found", 2)
PLC_SLICE_PREFIX="pl"
logger.log("api: Warning: admin slice prefix set to %s" %(PLC_SLICE_PREFIX), 2)
API_SERVER_PORT = 812
UNIX_ADDR = '/tmp/nodemanager.api'
class APIRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
# overriding _dispatch to achieve this effect is officially deprecated,
# but I can't figure out how to get access to .request without
# duplicating SimpleXMLRPCServer code here, which is more likely to
# change than the deprecated behavior is to be broken
@database.synchronized
def _dispatch(self, method_name_unicode, args):
method_name = str(method_name_unicode)
try: method = api_method_dict[method_name]
except KeyError:
api_method_list = api_method_dict.keys()
api_method_list.sort()
raise xmlrpclib.Fault(100, 'Invalid API method %s. Valid choices are %s' % \
(method_name, ', '.join(api_method_list)))
expected_nargs = nargs_dict[method_name]
if len(args) != expected_nargs:
raise xmlrpclib.Fault(101, 'Invalid argument count: got %d, expecting %d.' % \
(len(args), expected_nargs))
else:
# Figure out who's calling.
# XXX - these ought to be imported directly from some .h file
SO_PEERCRED = 17
sizeof_struct_ucred = 12
ucred = self.request.getsockopt(socket.SOL_SOCKET, SO_PEERCRED, sizeof_struct_ucred)
xid = struct.unpack('3i', ucred)[1]
caller_name = pwd.getpwuid(xid)[0]
# Special case : the sfa component manager
if caller_name == PLC_SLICE_PREFIX+"_sfacm":
try: result = method(*args)
except Exception, err: raise xmlrpclib.Fault(104, 'Error in call: %s' %err)
# Anyone can call these functions
elif method_name in ('Help', 'Ticket', 'GetXIDs', 'GetSSHKeys'):
try: result = method(*args)
except Exception, err: raise xmlrpclib.Fault(104, 'Error in call: %s' %err)
else: # Execute anonymous call.
# Authenticate the caller if not in the above fncts.
if method_name == "GetRecord":
target_name = caller_name
else:
target_name = args[0]
# Gather target slice's object.
target_rec = database.db.get(target_name)
# only work on slivers or self. Sanity check.
if not (target_rec and target_rec['type'].startswith('sliver.')):
raise xmlrpclib.Fault(102, \
'Invalid argument: the first argument must be a sliver name.')
# only manipulate slivers who delegate you authority
if caller_name in (target_name, target_rec['delegations']):
try: result = method(target_rec, *args[1:])
except Exception, err: raise xmlrpclib.Fault(104, 'Error in call: %s' %err)
else:
raise xmlrpclib.Fault(108, '%s: Permission denied.' % caller_name)
if result == None: result = 1
return result
class APIServer_INET(SocketServer.ThreadingMixIn, SimpleXMLRPCServer.SimpleXMLRPCServer): allow_reuse_address = True
class APIServer_UNIX(APIServer_INET): address_family = socket.AF_UNIX
def start():
"""Start two XMLRPC interfaces: one bound to localhost, the other bound to a Unix domain socket."""
logger.log('api.start')
serv1 = APIServer_INET(('127.0.0.1', API_SERVER_PORT), requestHandler=APIRequestHandler, logRequests=0)
tools.as_daemon_thread(serv1.serve_forever)
try: os.unlink(UNIX_ADDR)
except OSError, e:
if e.errno != errno.ENOENT: raise
serv2 = APIServer_UNIX(UNIX_ADDR, requestHandler=APIRequestHandler, logRequests=0)
tools.as_daemon_thread(serv2.serve_forever)
os.chmod(UNIX_ADDR, 0666)
| {
"content_hash": "8dfc511388ec290f452b600c691635cb",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 116,
"avg_line_length": 42.972972972972975,
"alnum_prop": 0.639832285115304,
"repo_name": "wangyang2013/NodeManager",
"id": "7b5ab95f73d230be7eaad4a7d30f6bec1b267f06",
"size": "4772",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "693"
},
{
"name": "Perl",
"bytes": "617"
},
{
"name": "Python",
"bytes": "307064"
},
{
"name": "Shell",
"bytes": "3931"
}
],
"symlink_target": ""
} |
import time
import os
import sys
import pygame
import PicoBorgRev
# Re-direct our output to standard error, we need to ignore standard out to hide some nasty print statements from pygame
sys.stdout = sys.stderr
# Setup the PicoBorg Reverse
PBR = PicoBorgRev.PicoBorgRev()
#PBR.i2cAddress = 0x44 # Uncomment and change the value if you have changed the board address
PBR.Init()
if not PBR.foundChip:
boards = PicoBorgRev.ScanForPicoBorgReverse()
if len(boards) == 0:
print 'No PicoBorg Reverse found, check you are attached :)'
else:
print 'No PicoBorg Reverse at address %02X, but we did find boards:' % (PBR.i2cAddress)
for board in boards:
print ' %02X (%d)' % (board, board)
print 'If you need to change the I²C address change the setup line so it is correct, e.g.'
print 'PBR.i2cAddress = 0x%02X' % (boards[0])
sys.exit()
#PBR.SetEpoIgnore(True) # Uncomment to disable EPO latch, needed if you do not have a switch / jumper
PBR.ResetEpo()
# Settings for the joystick
axisUpDown = 1 # Joystick axis to read for up / down position
axisUpDownInverted = False # Set this to True if up and down appear to be swapped
axisLeftRight = 2 # Joystick axis to read for left / right position
axisLeftRightInverted = False # Set this to True if left and right appear to be swapped
buttonResetEpo = 3 # Joystick button number to perform an EPO reset (Start)
buttonSlow = 8 # Joystick button number for driving slowly whilst held (L2)
slowFactor = 0.5 # Speed to slow to when the drive slowly button is held, e.g. 0.5 would be half speed
buttonFastTurn = 9 # Joystick button number for turning fast (R2)
interval = 0.00 # Time between updates in seconds, smaller responds faster but uses more processor time
# Setup pygame
os.environ["SDL_VIDEODRIVER"] = "dummy" # Removes the need to have a GUI window
pygame.init()
pygame.joystick.init()
joystick = pygame.joystick.Joystick(0)
joystick.init()
try:
print 'Press CTRL+C to quit'
driveLeft = 0.0
driveRight = 0.0
running = True
hadEvent = False
upDown = 0.0
leftRight = 0.0
# Loop indefinitely
while running:
# Get the latest events from the system
hadEvent = False
events = pygame.event.get()
# Handle each event individually
for event in events:
if event.type == pygame.QUIT:
# User exit
running = False
elif event.type == pygame.JOYBUTTONDOWN:
# A button on the joystick just got pushed down
hadEvent = True
elif event.type == pygame.JOYAXISMOTION:
# A joystick has been moved
hadEvent = True
if hadEvent:
# Read axis positions (-1 to +1)
if axisUpDownInverted:
upDown = -joystick.get_axis(axisUpDown)
else:
upDown = joystick.get_axis(axisUpDown)
if axisLeftRightInverted:
leftRight = -joystick.get_axis(axisLeftRight)
else:
leftRight = joystick.get_axis(axisLeftRight)
# Apply steering speeds
if not joystick.get_button(buttonFastTurn):
leftRight *= 0.5
# Determine the drive power levels
driveLeft = -upDown
driveRight = -upDown
if leftRight < -0.05:
# Turning left
driveLeft *= 1.0 + (2.0 * leftRight)
elif leftRight > 0.05:
# Turning right
driveRight *= 1.0 - (2.0 * leftRight)
# Check for button presses
if joystick.get_button(buttonResetEpo):
PBR.ResetEpo()
if joystick.get_button(buttonSlow):
driveLeft *= slowFactor
driveRight *= slowFactor
# Set the motors to the new speeds
PBR.SetMotor1(driveLeft)
PBR.SetMotor2(driveRight)
# Change the LED to reflect the status of the EPO latch
PBR.SetLed(PBR.GetEpo())
# Wait for the interval period
time.sleep(interval)
# Disable all drives
PBR.MotorsOff()
except KeyboardInterrupt:
# CTRL+C exit, disable all drives
PBR.MotorsOff()
print
| {
"content_hash": "fc640da67d96fd8c754387e4f6ec03c6",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 127,
"avg_line_length": 42.14545454545455,
"alnum_prop": 0.5787316652286454,
"repo_name": "Steven-Eardley/pibot_motor",
"id": "bdc22a7b64df8a3eca8704786faafda3c145001c",
"size": "4710",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "picoborgrev/pbrJoystick.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57689"
},
{
"name": "Shell",
"bytes": "1902"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tracker', '0004_auto_20160218_0811'),
]
operations = [
migrations.AddField(
model_name='connection',
name='system_A',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.PROTECT, related_name='System_A', to='tracker.System'),
preserve_default=False,
),
migrations.AddField(
model_name='connection',
name='system_B',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.PROTECT, related_name='System_B', to='tracker.System'),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='connection',
unique_together=set([('system_A', 'system_B')]),
),
migrations.AlterIndexTogether(
name='connection',
index_together=set([('system_A', 'system_B')]),
),
]
| {
"content_hash": "a0e572398c67e25e3e2711c1e360d4d0",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 141,
"avg_line_length": 32.64705882352941,
"alnum_prop": 0.5954954954954955,
"repo_name": "paftree/WHturk",
"id": "4c2801abc0396f499e5e5dcc40715cf253724d83",
"size": "1182",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tracker/migrations/0005_auto_20160218_0823.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "412079"
},
{
"name": "HTML",
"bytes": "36344"
},
{
"name": "JavaScript",
"bytes": "1526712"
},
{
"name": "Python",
"bytes": "48695"
},
{
"name": "Shell",
"bytes": "42"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0014_merge'),
]
operations = [
migrations.AlterField(
model_name='job',
name='email',
field=models.EmailField(max_length=254, verbose_name='Contact email'),
),
]
| {
"content_hash": "15cf1a35d6018e8b0f0126003908a20d",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 82,
"avg_line_length": 21.8125,
"alnum_prop": 0.5702005730659025,
"repo_name": "manhhomienbienthuy/pythondotorg",
"id": "bb4d8427eabb29443ed60af0135247f7716712db",
"size": "349",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "jobs/migrations/0015_auto_20170814_0301.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7686"
},
{
"name": "HTML",
"bytes": "491673"
},
{
"name": "JavaScript",
"bytes": "20834"
},
{
"name": "PostScript",
"bytes": "19072"
},
{
"name": "Procfile",
"bytes": "105"
},
{
"name": "Python",
"bytes": "1075699"
},
{
"name": "Ruby",
"bytes": "1464"
},
{
"name": "SCSS",
"bytes": "197973"
}
],
"symlink_target": ""
} |
from .base import SourceTestCase
class TestCase(SourceTestCase):
generator = 'excel'
output_name = 'chinook_xlsx.json'
def generate(self):
uri = self.input_path('chinook.xlsx')
client = self.module.Client(uri=uri)
return client.generate()
| {
"content_hash": "300fdd88236c966e3044fe365f931712",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 45,
"avg_line_length": 25.272727272727273,
"alnum_prop": 0.6618705035971223,
"repo_name": "chop-dbhi/prov-extractor",
"id": "2c28b82789ed87e362c873acc80290fb89a0c079",
"size": "278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sources/excel.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "PLpgSQL",
"bytes": "1952523"
},
{
"name": "Python",
"bytes": "96517"
},
{
"name": "Shell",
"bytes": "2268"
}
],
"symlink_target": ""
} |
"""Test Solr search using the synchronizer, i.e. as it would be used by an user
"""
import logging
import os
import time
import sys
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
sys.path[0:0] = [""]
from pymongo import MongoClient
from tests import solr_pair, mongo_host, STRESS_COUNT
from tests.setup_cluster import (start_replica_set,
kill_replica_set,
restart_mongo_proc,
kill_mongo_proc)
from tests.util import assert_soon
from pysolr import Solr, SolrError
from mongo_connector.connector import Connector
from mongo_connector.util import retry_until_ok
from pymongo.errors import OperationFailure, AutoReconnect
class TestSynchronizer(unittest.TestCase):
""" Tests Solr
"""
@classmethod
def setUpClass(cls):
_, cls.secondary_p, cls.primary_p = start_replica_set('test-solr')
cls.conn = MongoClient(mongo_host, cls.primary_p,
replicaSet='test-solr')
cls.solr_conn = Solr('http://%s/solr' % solr_pair)
cls.solr_conn.delete(q='*:*')
@classmethod
def tearDownClass(cls):
""" Kills cluster instance
"""
kill_replica_set('test-solr')
def setUp(self):
try:
os.unlink("config.txt")
except OSError:
pass
open("config.txt", "w").close()
self.connector = Connector(
address='%s:%s' % (mongo_host, self.primary_p),
oplog_checkpoint='config.txt',
target_url='http://localhost:8983/solr',
ns_set=['test.test'],
u_key='_id',
auth_key=None,
doc_manager='mongo_connector/doc_managers/solr_doc_manager.py',
auto_commit_interval=0
)
self.connector.start()
assert_soon(lambda: len(self.connector.shard_set) > 0)
retry_until_ok(self.conn.test.test.remove)
assert_soon(lambda: sum(1 for _ in self.solr_conn.search('*:*')) == 0)
def tearDown(self):
self.connector.join()
def test_shard_length(self):
"""Tests the shard_length to see if the shard set was recognized
"""
self.assertEqual(len(self.connector.shard_set), 1)
def test_insert(self):
"""Tests insert
"""
self.conn['test']['test'].insert({'name': 'paulie'})
assert_soon(lambda: sum(1 for _ in self.solr_conn.search('*:*')) > 0)
result_set_1 = list(self.solr_conn.search('paulie'))
self.assertEqual(len(result_set_1), 1)
result_set_2 = self.conn['test']['test'].find_one()
for item in result_set_1:
self.assertEqual(item['_id'], str(result_set_2['_id']))
self.assertEqual(item['name'], result_set_2['name'])
def test_remove(self):
"""Tests remove
"""
self.conn['test']['test'].insert({'name': 'paulie'})
assert_soon(lambda: sum(1 for _ in self.solr_conn.search("*:*")) == 1)
self.conn['test']['test'].remove({'name': 'paulie'})
assert_soon(lambda: sum(1 for _ in self.solr_conn.search("*:*")) == 0)
def test_update(self):
"""Test update operations on Solr.
Need to have the following defined in schema.xml:
<field name="a" type="int" indexed="true" stored="true" />
<field name="b.0.c" type="int" indexed="true" stored="true" />
<field name="b.10.c" type="int" indexed="true" stored="true" />
<field name="b.0.e" type="int" indexed="true" stored="true" />
<field name="b.1.d" type="int" indexed="true" stored="true" />
<field name="b.1.f" type="int" indexed="true" stored="true" />
<field name="b.2.e" type="int" indexed="true" stored="true" />
"""
docman = self.connector.doc_managers[0]
# Insert
self.conn.test.test.insert({"a": 0})
assert_soon(lambda: sum(1 for _ in docman._search("*:*")) == 1)
def check_update(update_spec):
updated = self.conn.test.test.find_and_modify(
{"a": 0},
update_spec,
new=True
)
# Stringify _id to match what will be retrieved from Solr
updated['_id'] = str(updated['_id'])
# Flatten the MongoDB document to match Solr
updated = docman._clean_doc(updated)
# Allow some time for update to propagate
time.sleep(1)
replicated = list(docman._search("a:0"))[0]
# Remove add'l fields until these are stored in a separate Solr core
replicated.pop("_ts")
replicated.pop("ns")
# Remove field added by Solr
replicated.pop("_version_")
self.assertEqual(replicated, docman._clean_doc(updated))
# Update by adding a field.
# Note that Solr can't mix types within an array
check_update({"$set": {"b": [{"c": 10}, {"d": 11}]}})
# Update by setting an attribute of a sub-document beyond end of array.
check_update({"$set": {"b.10.c": 42}})
# Update by changing a value within a sub-document (contains array)
check_update({"$inc": {"b.0.c": 1}})
# Update by changing the value within an array
check_update({"$inc": {"b.1.f": 12}})
# Update by adding new bucket to list
check_update({"$push": {"b": {"e": 12}}})
# Update by replacing an entire sub-document
check_update({"$set": {"b.0": {"e": 4}}})
# Update by adding a sub-document
check_update({"$set": {"b": {"0": {"c": 100}}}})
# Update whole document
check_update({"a": 0, "b": {"1": {"d": 10000}}})
def test_rollback(self):
"""Tests rollback. We force a rollback by inserting one doc, killing
primary, adding another doc, killing the new primary, and
restarting both the servers.
"""
primary_conn = MongoClient(mongo_host, self.primary_p)
self.conn['test']['test'].insert({'name': 'paul'})
assert_soon(
lambda: self.conn.test.test.find({'name': 'paul'}).count() == 1)
assert_soon(
lambda: sum(1 for _ in self.solr_conn.search('*:*')) == 1)
kill_mongo_proc(self.primary_p, destroy=False)
new_primary_conn = MongoClient(mongo_host, self.secondary_p)
admin_db = new_primary_conn['admin']
while admin_db.command("isMaster")['ismaster'] is False:
time.sleep(1)
time.sleep(5)
retry_until_ok(self.conn.test.test.insert,
{'name': 'pauline'})
assert_soon(
lambda: sum(1 for _ in self.solr_conn.search('*:*')) == 2)
result_set_1 = list(self.solr_conn.search('pauline'))
result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'})
self.assertEqual(len(result_set_1), 1)
for item in result_set_1:
self.assertEqual(item['_id'], str(result_set_2['_id']))
kill_mongo_proc(self.secondary_p, destroy=False)
restart_mongo_proc(self.primary_p)
while primary_conn['admin'].command("isMaster")['ismaster'] is False:
time.sleep(1)
restart_mongo_proc(self.secondary_p)
time.sleep(2)
result_set_1 = self.solr_conn.search('pauline')
self.assertEqual(sum(1 for _ in result_set_1), 0)
result_set_2 = self.solr_conn.search('paul')
self.assertEqual(sum(1 for _ in result_set_2), 1)
def test_stress(self):
"""Test stress by inserting and removing a large amount of docs.
"""
#stress test
for i in range(0, STRESS_COUNT):
self.conn['test']['test'].insert({'name': 'Paul ' + str(i)})
time.sleep(5)
assert_soon(
lambda: sum(1 for _ in self.solr_conn.search(
'*:*', rows=STRESS_COUNT)) == STRESS_COUNT)
for i in range(0, STRESS_COUNT):
result_set_1 = self.solr_conn.search('Paul ' + str(i))
for item in result_set_1:
self.assertEqual(item['_id'], item['_id'])
def test_stressed_rollback(self):
"""Test stressed rollback with a large number of documents"""
for i in range(0, STRESS_COUNT):
self.conn['test']['test'].insert(
{'name': 'Paul ' + str(i)})
assert_soon(
lambda: sum(1 for _ in self.solr_conn.search(
'*:*', rows=STRESS_COUNT)) == STRESS_COUNT)
primary_conn = MongoClient(mongo_host, self.primary_p)
kill_mongo_proc(self.primary_p, destroy=False)
new_primary_conn = MongoClient(mongo_host, self.secondary_p)
admin_db = new_primary_conn['admin']
while admin_db.command("isMaster")['ismaster'] is False:
time.sleep(1)
time.sleep(5)
count = -1
while count + 1 < STRESS_COUNT:
try:
count += 1
self.conn['test']['test'].insert(
{'name': 'Pauline ' + str(count)})
except (OperationFailure, AutoReconnect):
time.sleep(1)
collection_size = self.conn['test']['test'].find().count()
assert_soon(
lambda: sum(1 for _ in self.solr_conn.search(
'*:*', rows=STRESS_COUNT * 2)) == collection_size)
result_set_1 = self.solr_conn.search(
'Pauline',
rows=STRESS_COUNT * 2, sort='_id asc'
)
for item in result_set_1:
result_set_2 = self.conn['test']['test'].find_one(
{'name': item['name']})
self.assertEqual(item['_id'], str(result_set_2['_id']))
kill_mongo_proc(self.secondary_p, destroy=False)
restart_mongo_proc(self.primary_p)
while primary_conn['admin'].command("isMaster")['ismaster'] is False:
time.sleep(1)
restart_mongo_proc(self.secondary_p)
assert_soon(lambda: sum(1 for _ in self.solr_conn.search(
'Pauline', rows=STRESS_COUNT * 2)) == 0)
result_set_1 = list(self.solr_conn.search(
'Pauline',
rows=STRESS_COUNT * 2
))
self.assertEqual(len(result_set_1), 0)
result_set_2 = list(self.solr_conn.search(
'Paul',
rows=STRESS_COUNT * 2
))
self.assertEqual(len(result_set_2), STRESS_COUNT)
def test_valid_fields(self):
""" Tests documents with field definitions
"""
inserted_obj = self.conn['test']['test'].insert(
{'name': 'test_valid'})
self.conn['test']['test'].update(
{'_id': inserted_obj},
{'$set': {'popularity': 1}}
)
docman = self.connector.doc_managers[0]
assert_soon(lambda: sum(1 for _ in docman._search("*:*")) > 0)
result = docman.get_last_doc()
self.assertIn('popularity', result)
self.assertEqual(sum(1 for _ in docman._search(
"name=test_valid")), 1)
def test_invalid_fields(self):
""" Tests documents without field definitions
"""
inserted_obj = self.conn['test']['test'].insert(
{'name': 'test_invalid'})
self.conn['test']['test'].update(
{'_id': inserted_obj},
{'$set': {'break_this_test': 1}}
)
docman = self.connector.doc_managers[0]
assert_soon(lambda: sum(1 for _ in docman._search("*:*")) > 0)
result = docman.get_last_doc()
self.assertNotIn('break_this_test', result)
self.assertEqual(sum(1 for _ in docman._search(
"name=test_invalid")), 1)
def test_dynamic_fields(self):
""" Tests dynamic field definitions
The following fields are supplied in the provided schema.xml:
<dynamicField name="*_i" type="int" indexed="true" stored="true"/>
<dynamicField name="i_*" type="int" indexed="true" stored="true"/>
Cases:
1. Match on first definition
2. Match on second definition
3. No match
"""
self.solr_conn.delete(q='*:*')
match_first = {"_id": 0, "foo_i": 100}
match_second = {"_id": 1, "i_foo": 200}
match_none = {"_id": 2, "foo": 300}
# Connector is already running
self.conn["test"]["test"].insert(match_first)
self.conn["test"]["test"].insert(match_second)
self.conn["test"]["test"].insert(match_none)
# Should have documents in Solr now
assert_soon(lambda: sum(1 for _ in self.solr_conn.search("*:*")) > 0,
"Solr doc manager should allow dynamic fields")
# foo_i and i_foo should be indexed, foo field should not exist
self.assertEqual(sum(1 for _ in self.solr_conn.search("foo_i:100")), 1)
self.assertEqual(sum(1 for _ in self.solr_conn.search("i_foo:200")), 1)
# SolrError: "undefined field foo"
logger = logging.getLogger("pysolr")
logger.error("You should see an ERROR log message from pysolr here. "
"This indicates success, not an error in the test.")
with self.assertRaises(SolrError):
self.solr_conn.search("foo:300")
def test_nested_fields(self):
"""Test indexing fields that are sub-documents in MongoDB
The following fields are defined in the provided schema.xml:
<field name="person.address.street" type="string" ... />
<field name="person.address.state" type="string" ... />
<dynamicField name="numbers.*" type="string" ... />
<dynamicField name="characters.*" type="string" ... />
"""
# Connector is already running
self.conn["test"]["test"].insert({
"name": "Jeb",
"billing": {
"address": {
"street": "12345 Mariposa Street",
"state": "California"
}
}
})
self.conn["test"]["test"].insert({
"numbers": ["one", "two", "three"],
"characters": [
{"name": "Big Bird",
"color": "yellow"},
{"name": "Elmo",
"color": "red"},
"Cookie Monster"
]
})
assert_soon(lambda: sum(1 for _ in self.solr_conn.search("*:*")) > 0,
"documents should have been replicated to Solr")
# Search for first document
results = self.solr_conn.search(
"billing.address.street:12345\ Mariposa\ Street")
self.assertEqual(len(results), 1)
self.assertEqual(next(iter(results))["billing.address.state"],
"California")
# Search for second document
results = self.solr_conn.search(
"characters.1.color:red")
self.assertEqual(len(results), 1)
self.assertEqual(next(iter(results))["numbers.2"], "three")
results = self.solr_conn.search("characters.2:Cookie\ Monster")
self.assertEqual(len(results), 1)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "78c04ceb33e921d8cd7cd38991913716",
"timestamp": "",
"source": "github",
"line_count": 409,
"max_line_length": 80,
"avg_line_length": 37.06112469437653,
"alnum_prop": 0.5494788230637287,
"repo_name": "sat2050/mongo-connector",
"id": "9ca92b626933b96db18e64cb7ac87ac153efc9ae",
"size": "15737",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_solr.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "269463"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class LineValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="line", parent_name="scatterternary.marker", **kwargs
):
super(LineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Line"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.line.colorscale`. Has an
effect only if in `marker.line.color`is set to
a numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has
an effect only if in `marker.line.color`is set
to a numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are
set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.line.cmin` and/or
`marker.line.cmax` to be equidistant to this
point. Has an effect only if in
`marker.line.color`is set to a numerical array.
Value should have the same units as in
`marker.line.color`. Has no effect when
`marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmax` must be set as well.
color
Sets themarker.linecolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if
set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array.
The colorscale must be an array containing
arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.line.cmin` and
`marker.line.cmax`. Alternatively, `colorscale`
may be a palette name string of the following
list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,R
eds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Black
body,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.line.color`is set to
a numerical array. If true, `marker.line.cmin`
will correspond to the last color in the array
and `marker.line.cmax` will correspond to the
first color.
width
Sets the width (in px) of the lines bounding
the marker points.
widthsrc
Sets the source reference on Chart Studio Cloud
for width .
""",
),
**kwargs
)
| {
"content_hash": "f48462cdbb104a345b637985c13386c9",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 79,
"avg_line_length": 48.471698113207545,
"alnum_prop": 0.5455430128454651,
"repo_name": "plotly/python-api",
"id": "47d6c3278697b4c1ad1d5b2a121248eb6bd02903",
"size": "5138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scatterternary/marker/_line.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import argparse
import sys
from pathlib import Path
import fontTools.designspaceLib
import fontTools.ttLib
import statmake.classes
import statmake.lib
def main(args=None):
if not args:
args = sys.argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument(
"stylespace_file", type=Path, help="The path to the Stylespace file."
)
parser.add_argument(
"designspace_file",
type=Path,
help="The path to the Designspace file used to generate the variable font.",
)
parser.add_argument(
"variable_font", type=Path, help="The path to the variable font file."
)
parsed_args = parser.parse_args(args)
stylespace = statmake.classes.Stylespace.from_file(parsed_args.stylespace_file)
designspace = fontTools.designspaceLib.DesignSpaceDocument.fromfile(
parsed_args.designspace_file
)
additional_locations = designspace.lib.get("org.statmake.additionalLocations", {})
font = fontTools.ttLib.TTFont(parsed_args.variable_font)
statmake.lib.apply_stylespace_to_variable_font(
stylespace, font, additional_locations
)
font.save(parsed_args.variable_font)
| {
"content_hash": "1348093e0a4d385c6b1b9530697b8fc1",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 86,
"avg_line_length": 29.525,
"alnum_prop": 0.7027942421676545,
"repo_name": "googlefonts/statmake",
"id": "3ef28edb8ff76f9ab8688a7ca8fbd3b0e47f2777",
"size": "1181",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "statmake/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26715"
}
],
"symlink_target": ""
} |
from authorize import gen_xml as xml, util, base, responses as resp
from authorize.gen_xml import x, AuthorizeSystemError
from authorize.util import request
from authorize.gen_xml import INDIVIDUAL, BUSINESS, ECHECK_CCD, ECHECK_PPD, ECHECK_TEL, ECHECK_WEB
from authorize.gen_xml import BANK, CREDIT_CARD, DAYS_INTERVAL, MONTHS_INTERVAL
from authorize.gen_xml import ACCOUNT_CHECKING, ACCOUNT_SAVINGS
from authorize.gen_xml import ACCOUNT_BUSINESS_CHECKING
class Api(base.BaseApi):
"""
Main ARB api object.
Each api call will return a response dictionary in formats similar
to:
{'messages': {'message': {'code': {'text_': u'I00001'},
'text': {'text_': u'Successful.'}},
'result_code': {'text_': u'Ok'}}}
with all the possible variations and arguments depending on the
format specified by Authorize.net at:
http://www.authorize.net/support/ARB_guide.pdf
a field in the response can be accesses by using either dictionary
access methods:
response['messages']['message']['code']['text_']
or object dot-notation:
response.messages.message.code.text_
NOTE:
It's important that you make sure that your Authorize dashboard
uses the same delimiter and encapsulator that you are using in
your API objects. If you don't check this it could happen that the
direct_response cannot be parsed even in those cases where it's
absolutely necessary, like in the AIM API.
"""
responses = resp.arb_map
@request
def create_subscription(**kw):
"""
create a payment subscription
arguments:
REQUIRED:
interval_unit: L{DAYS_INTERVAL} or L{MONTHS_INTERVAL}
interval_length: up to 3 digits, 1-12 for months, 7-365 for days
start_date: YYYY-MM-DD of type L{unicode}
amount: L{float} or L{decimal.Decimal}
profile_type: L{CREDIT_CARD} (default) or L{BANK}
card_number: L{unicode} or L{int}, required with CREDIT_CARD
expiration_date: YYYY-MM, required with CREDIT_CARD
routing_number: 9 digits, required with BANK
account_number: 5 to 17 digits, required with BANK
name_on_account: required with BANK
bill_first_name
bill_last_name
OPTIONAL or CONDITIONAL:
subscription_name: unique name for the subscription
total_occurrences: up to 4 digits, default 9999
trial_occurrences: up to 4 digits
trial_amount: L{float} or L{decimal.Decimal}, must
be provided when trial_occurrences is set
invoice_number:
description:
customer_type: L{INDIVIDUAL} or L{BUSINESS}
customer_id:
customer_email:
phone:
fax:
driver_number: customer driving license number
driver_state: license state
driver_birth: date of birth on the license
tax_id:
account_type: L{ACCOUNT_CHECKING} or L{ACCOUNT_SAVINGS}
or L{ACCOUNT_BUSINESS_CHECKING}, only with BANK
bank_name:
echeck_type: L{ECHECK_CCD} or L{ECHECK_TEL} or
L{ECHECK_PPD} or L{ECHECK_WEB}, only with BANK
ship_first_name:
ship_last_name:
bill_company, ship_company:
bill_address, ship_address:
bill_city, ship_city:
bill_state, ship_state:
bill_zip, ship_zip:
bill_country, ship_country:
ship_phone:
ship_fax:
"""
return 'ARBCreateSubscriptionRequest', kw, xml.subscription(**kw)
@request
def update_subscription(**kw):
"""
update a payment subscription
arguments: same as create_subscription plus
subscription_id: required
"""
return ('ARBUpdateSubscriptionRequest', kw,
x.subscriptionId(kw['subscription_id']),
xml.subscription(**kw)
)
@request
def cancel_subscription(**kw):
"""
cancel subscription
arguments:
subscription_id: required
"""
return ('ARBCancelSubscriptionRequest', kw,
x.subscriptionId(kw['subscription_id'])
)
| {
"content_hash": "9ab53543ceb3b0852cfc6cf62bf59be4",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 98,
"avg_line_length": 36.685483870967744,
"alnum_prop": 0.5832051000219829,
"repo_name": "imtapps/authorize",
"id": "0c36a4e54e9850cf0be046921690912ef6d54532",
"size": "4549",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "authorize/arb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "136891"
}
],
"symlink_target": ""
} |
__all__ = ["experiment", "curve", "segment"] | {
"content_hash": "0b5cf4543380b38e4f5657708ee37730",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 44,
"avg_line_length": 44,
"alnum_prop": 0.5681818181818182,
"repo_name": "massimovassalli/SingleCellForceSpectroscopy",
"id": "82183f0c8938e39cc5abefabea3bf80585a866ab",
"size": "44",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sifork/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "124487"
}
],
"symlink_target": ""
} |
from fauxmo.fauxmo import poller,upnp_broadcast_responder,fauxmo
import os
import time
# Import handler classes
from ir_remote import tv_power_handler,tv_mute_handler
from pianobar import play_workout_handler
# Main script that activates the listener. This polls continuously waiting for a signal
# from the amazon echo. List all devices in TRIGGERS variable and import corresponding handler.
#
# To run, enter "python main.py". I recommending doing inside a
# tmux or screen session so you can detatch.
#
# List all triggers here
TRIGGERS = {"TV": {'port':52000,'action':tv_power_handler()},
"TV Speakers": {'port':0, 'action':tv_mute_handler()},
"Basement speakers" :{'port':0, 'action':play_workout_handler()}}
# Main method, sets up devices and continually polls
if __name__ == "__main__":
print "Setting up"
# Startup the fauxmo server
#fauxmo.DEBUG = True
p = poller()
u = upnp_broadcast_responder()
u.init_socket()
p.add(u)
# Register the device callback as a fauxmo handler
for trig, d in TRIGGERS.items():
fauxmo(trig, u, p, None, d['port'],d['action'])
# Loop and poll for incoming Echo requests
print "Entering fauxmo polling loop"
while True:
try:
# Allow time for a ctrl-c to stop the process
p.poll(100)
time.sleep(0.1)
except Exception, e:
print "An error har occured.."
break
| {
"content_hash": "b46eaa676052f029f87114fd3d833ed4",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 96,
"avg_line_length": 33.111111111111114,
"alnum_prop": 0.6416107382550336,
"repo_name": "markveillette/smarthome",
"id": "4348587e92ac365762a2172d08a7785231e931fd",
"size": "1512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21032"
},
{
"name": "Shell",
"bytes": "388"
}
],
"symlink_target": ""
} |
from collections import namedtuple, Counter
__all__ = ('Field', 'namedbuffer', 'buffer_for',)
Field = namedtuple(
'Field',
('name', 'size_bytes', 'format_string', 'encoder'),
)
Pad = namedtuple(
'Pad',
('size_bytes', 'format_string'),
)
def make_field(name, size_bytes, format_string, encoder=None):
if size_bytes < 0:
raise ValueError('negative size_bytes')
return Field(
name,
size_bytes,
format_string,
encoder,
)
def pad(bytes):
return Pad(
bytes,
'{}x'.format(bytes),
)
def buffer_for(klass):
""" Returns a new buffer of the appropriate size for klass. """
return bytearray(klass.size)
def compute_slices(fields_spec):
names_slices = dict()
start = 0
for field in fields_spec:
end = start + field.size_bytes
if not isinstance(field, Pad): # do not create slices for paddings
names_slices[field.name] = slice(start, end)
start = end
return names_slices
def namedbuffer(buffer_name, fields_spec): # noqa (ignore ciclomatic complexity)
""" Class factory, returns a class to wrap a buffer instance and expose the
data as fields.
The field spec specifies how many bytes should be used for a field and what
is the encoding / decoding function.
"""
# pylint: disable=protected-access,unused-argument
if not len(buffer_name):
raise ValueError('buffer_name is empty')
if not len(fields_spec):
raise ValueError('fields_spec is empty')
fields = [
field
for field in fields_spec
if not isinstance(field, Pad)
]
if any(field.size_bytes < 0 for field in fields):
raise ValueError('negative size_bytes')
if any(len(field.name) < 0 for field in fields):
raise ValueError('field missing name')
names_fields = {
field.name: field
for field in fields
}
if 'data' in names_fields:
raise ValueError('data field shadowing underlying buffer')
if any(count > 1 for count in Counter(field.name for field in fields).values()):
raise ValueError('repeated field name')
# big endian format
fields_format = '>' + ''.join(field.format_string for field in fields_spec)
size = sum(field.size_bytes for field in fields_spec)
names_slices = compute_slices(fields_spec)
sorted_names = sorted(names_fields.keys())
def __init__(self, data):
if len(data) != size:
raise ValueError('data buffer has the wrong size, expected {}'.format(size))
object.__setattr__(self, 'data', data)
# Intentionally exposing only the attributes from the spec, since the idea
# is for the instance to expose the underlying buffer as attributes
def __getattribute__(self, name):
if name in names_slices:
slice_ = names_slices[name]
field = names_fields[name]
data = object.__getattribute__(self, 'data')
value = data[slice_]
if field.encoder:
value = field.encoder.decode(value)
return value
if name == 'data':
return object.__getattribute__(self, 'data')
raise AttributeError
def __setattr__(self, name, value):
if name in names_slices:
slice_ = names_slices[name]
field = names_fields[name]
if field.encoder:
field.encoder.validate(value)
value = field.encoder.encode(value, field.size_bytes)
length = len(value)
if length > field.size_bytes:
msg = 'value with length {length} for {attr} is too big'.format(
length=length,
attr=name,
)
raise ValueError(msg)
elif length < field.size_bytes:
pad_size = field.size_bytes - length
pad_value = b'\x00' * pad_size
value = pad_value + value
data = object.__getattribute__(self, 'data')
data[slice_] = value
else:
super(self.__class__, self).__setattr__(name, value)
def __repr__(self):
return '<{} [...]>'.format(buffer_name)
def __len__(self):
return size
def __dir__(self):
return sorted_names
attributes = {
'__init__': __init__,
'__slots__': ('data',),
'__getattribute__': __getattribute__,
'__setattr__': __setattr__,
'__repr__': __repr__,
'__len__': __len__,
'__dir__': __dir__,
# These are class attributes hidden from instance, i.e. must be
# accessed through the class instance.
'fields_spec': fields_spec,
'format': fields_format,
'size': size,
}
return type(buffer_name, (), attributes)
| {
"content_hash": "6143727b79c15aa9bf4c3c7781193ac0",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 88,
"avg_line_length": 27.502824858757062,
"alnum_prop": 0.5708709942481512,
"repo_name": "charles-cooper/raiden",
"id": "e1bc0e8b7078d4f1890756501f088b4b52a65dea",
"size": "4892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "raiden/encoding/format.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "5202"
},
{
"name": "Python",
"bytes": "952454"
},
{
"name": "Shell",
"bytes": "4384"
}
],
"symlink_target": ""
} |
GroupData = {
'options': ['insert if not exists'],
'csv': 'group.csv',
}
| {
"content_hash": "ff0180caedd7edb5e4aa4d3a0605585d",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 40,
"avg_line_length": 21.25,
"alnum_prop": 0.5176470588235295,
"repo_name": "aagusti/o-sipkd",
"id": "d7a768cd454c99cd2906fb71c1458cb2eaf0b621",
"size": "85",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "osipkd/scripts/data/group.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "15982"
},
{
"name": "CSS",
"bytes": "107334"
},
{
"name": "HTML",
"bytes": "1317001"
},
{
"name": "JavaScript",
"bytes": "983058"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "PHP",
"bytes": "1684"
},
{
"name": "PLpgSQL",
"bytes": "2828"
},
{
"name": "Python",
"bytes": "615248"
},
{
"name": "Shell",
"bytes": "241"
},
{
"name": "Smarty",
"bytes": "2003"
}
],
"symlink_target": ""
} |
"""S3/CloudFront Let's Encrypt installer plugin."""
from __future__ import print_function
import os
import sys
import logging
import time
import zope.interface
import boto3
import botocore
from certbot import interfaces
from certbot.plugins import common
logger = logging.getLogger(__name__)
@zope.interface.implementer(interfaces.IInstaller)
@zope.interface.provider(interfaces.IPluginFactory)
class Installer(common.Plugin):
description = "S3/CloudFront Installer"
@classmethod
def add_parser_arguments(cls, add):
add("cf-distribution-id", default=os.getenv('CF_DISTRIBUTION_ID'),
help="CloudFront distribution id")
def __init__(self, *args, **kwargs):
self.certificate_main_domain = None
self.certificate_id = None
super(Installer, self).__init__(*args, **kwargs)
def prepare(self): # pylint: disable=missing-docstring,no-self-use
pass # pragma: no cover
def more_info(self): # pylint: disable=missing-docstring,no-self-use
return ""
def get_all_names(self): # pylint: disable=missing-docstring,no-self-use
pass # pragma: no cover
def deploy_cert(self, domain, cert_path, key_path, chain_path, fullchain_path):
"""
Upload Certificate to IAM
"""
# Only install a certificate once
if self.certificate_id is not None:
return
if self.config.rsa_key_size > 2048:
logger.error(
"The maximum public key size allowed for Cloudfront is 2048 ("
"https://docs.aws.amazon.com/AmazonCloudFront/latest"
"/DeveloperGuide/cnames-and-https-requirements.html)\n"
"Please, use --rsa_key_size 2048 or edit your cli.ini")
sys.exit(1)
client = boto3.client('iam')
name = "le-%s" % domain
body = open(cert_path).read()
key = open(key_path).read()
chain = open(chain_path).read()
suffix = "-%i" % int(time.time())
# Upload cert to IAM
response = client.upload_server_certificate(
Path="/cloudfront/letsencrypt/",
ServerCertificateName=name + suffix,
CertificateBody=body,
PrivateKey=key,
CertificateChain=chain
)
self.certificate_id = response['ServerCertificateMetadata']['ServerCertificateId']
self.certificate_main_domain = domain
def enhance(self, domain, enhancement, options=None): # pylint: disable=missing-docstring,no-self-use
pass # pragma: no cover
def supported_enhancements(self): # pylint: disable=missing-docstring,no-self-use
return [] # pragma: no cover
def get_all_certs_keys(self): # pylint: disable=missing-docstring,no-self-use
pass # pragma: no cover
def save(self, title=None, temporary=False): # pylint: disable=no-self-use
"""
Save the Cloudfront config if needed
"""
if self.certificate_id is None:
# Nothing to save
return
client = boto3.client('iam')
cf_client = boto3.client('cloudfront')
# Update CloudFront config to use the new one
cf_cfg = cf_client.get_distribution_config(Id=self.conf('cf-distribution-id'))
# If we already have the right certificate, then don't change the config.
if ('IAMCertificateId' in cf_cfg['DistributionConfig']['ViewerCertificate'] and
cf_cfg['DistributionConfig']['ViewerCertificate']['IAMCertificateId'] == self.certificate_id):
return
cf_cfg['DistributionConfig']['ViewerCertificate']['IAMCertificateId'] = self.certificate_id
cf_cfg['DistributionConfig']['ViewerCertificate']['Certificate'] = self.certificate_id
cf_cfg['DistributionConfig']['ViewerCertificate']['CertificateSource'] = 'iam'
# Set the default mode to SNI-only to avoid surprise charges
if 'SSLSupportMethod' not in cf_cfg['DistributionConfig']['ViewerCertificate']:
cf_cfg['DistributionConfig']['ViewerCertificate']['SSLSupportMethod'] = 'sni-only'
cf_cfg['DistributionConfig']['ViewerCertificate']['MinimumProtocolVersion'] = 'TLSv1.2_2018'
try:
cf_cfg['DistributionConfig']['ViewerCertificate'].pop('CloudFrontDefaultCertificate')
except KeyError:
pass
try:
cf_cfg['DistributionConfig']['ViewerCertificate'].pop('ACMCertificateArn')
except KeyError:
pass
response = cf_client.update_distribution(DistributionConfig=cf_cfg['DistributionConfig'],
Id=self.conf('cf-distribution-id'),
IfMatch=cf_cfg['ETag'])
# TODO check response
# Delete old certs
certificates = client.list_server_certificates(
PathPrefix="/cloudfront/letsencrypt/"
)
for cert in certificates['ServerCertificateMetadataList']:
if (self.certificate_main_domain in cert['ServerCertificateName'] and
cert['ServerCertificateId'] != self.certificate_id):
try:
client.delete_server_certificate(
ServerCertificateName=cert['ServerCertificateName']
)
except botocore.exceptions.ClientError as e:
logger.error(e)
def rollback_checkpoints(self, rollback=1): # pylint: disable=missing-docstring,no-self-use
pass # pragma: no cover
def recovery_routine(self): # pylint: disable=missing-docstring,no-self-use
pass # pragma: no cover
def view_config_changes(self): # pylint: disable=missing-docstring,no-self-use
pass # pragma: no cover
def config_test(self): # pylint: disable=missing-docstring,no-self-use
pass # pragma: no cover
def restart(self): # pylint: disable=missing-docstring,no-self-use
pass # pragma: no cover
def renew_deploy(self, lineage, *args, **kwargs): # pylint: disable=missing-docstring,no-self-use
"""
Renew certificates when calling `certbot renew`
"""
# Run deploy_cert with the lineage params
self.deploy_cert(lineage.names()[0], lineage.cert_path, lineage.key_path, lineage.chain_path, lineage.fullchain_path)
return
interfaces.RenewDeployer.register(Installer)
| {
"content_hash": "693c23f64acec44c9930d162bf05718d",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 125,
"avg_line_length": 36.857142857142854,
"alnum_prop": 0.6288372093023256,
"repo_name": "dlapiduz/letsencrypt-s3front",
"id": "405f01aa0764b1cca43e3dc1136116e9c9a81b1e",
"size": "6450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "certbot_s3front/installer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9712"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess
import glob
import argparse
def convert_execute_and_sync_script(script_path):
def run(cmd):
subprocess.run([cmd], shell=True)
notebook_path = script_path.replace('py', 'ipynb')
run(f"jupytext --update --to notebook {script_path}")
run(f"jupyter nbconvert "
f"--ExecutePreprocessor.allow_errors=True "
f"--ExecutePreprocessor.timeout=1000 "
f"--ExecutePreprocessor.iopub_timeout=5 "
f"--to notebook --execute --inplace {notebook_path}")
run(f"jupyter nbconvert {notebook_path}")
run(f"jupytext --sync {script_path}")
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('script_path', type=str)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_arguments()
if args.script_path == 'all':
example_scripts = glob.glob("examples/**/*.py", recursive=True)
for _example_script in example_scripts:
convert_execute_and_sync_script(_example_script)
tutorial_scripts = glob.glob("tutorials/**/*.py", recursive=True)
for _tutorial_script in tutorial_scripts:
convert_execute_and_sync_script(_tutorial_script)
else:
convert_execute_and_sync_script(args.script_path)
| {
"content_hash": "cac7fab7f46db92b85d729a348750e41",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 73,
"avg_line_length": 31.093023255813954,
"alnum_prop": 0.6477187733732236,
"repo_name": "interactiveaudiolab/nussl",
"id": "57ab55ef2fc068ead39cfc59a676de247580402b",
"size": "1337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/create_and_execute_notebook.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "MATLAB",
"bytes": "11692"
},
{
"name": "Python",
"bytes": "591205"
},
{
"name": "Shell",
"bytes": "26"
}
],
"symlink_target": ""
} |
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
class PlanetPlugin(CMSPluginBase):
"""
The contents of this plugin are never rendered.
Only render the toolbar entries.
"""
parent_classes = ['SolarSystemPlugin']
render_template = 'nested_plugins_app/planet.html'
require_parent = True
class SolarSystemPlugin(CMSPluginBase):
allow_children = True
render_template = 'nested_plugins_app/solar_system.html'
child_classes = ['PlanetPlugin']
plugin_pool.register_plugin(PlanetPlugin)
plugin_pool.register_plugin(SolarSystemPlugin)
| {
"content_hash": "ff6b6d132b9f818f9c662321d3757998",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 60,
"avg_line_length": 27.5,
"alnum_prop": 0.743801652892562,
"repo_name": "datakortet/django-cms",
"id": "4d75c5238974628146a351b9e38b818b0b8e62ca",
"size": "605",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "cms/test_utils/project/nested_plugins_app/cms_plugins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "203975"
},
{
"name": "JavaScript",
"bytes": "1249081"
},
{
"name": "Python",
"bytes": "2374270"
},
{
"name": "SCSS",
"bytes": "137720"
},
{
"name": "Shell",
"bytes": "22511"
}
],
"symlink_target": ""
} |
import email
import os.path
import time
from django.conf import settings
from django.test import TestCase
from django_mailbox import models, utils
from django_mailbox.models import Mailbox, Message
class EmailIntegrationTimeout(Exception):
pass
def get_email_as_text(name):
with open(
os.path.join(
os.path.dirname(__file__),
'messages',
name,
),
'rb'
) as f:
return f.read()
class EmailMessageTestCase(TestCase):
ALLOWED_EXTRA_HEADERS = [
'MIME-Version',
'Content-Transfer-Encoding',
]
def setUp(self):
dm_settings = utils.get_settings()
self._ALLOWED_MIMETYPES = dm_settings['allowed_mimetypes']
self._STRIP_UNALLOWED_MIMETYPES = (
dm_settings['strip_unallowed_mimetypes']
)
self._TEXT_STORED_MIMETYPES = dm_settings['text_stored_mimetypes']
self.mailbox = Mailbox.objects.create(from_email='from@example.com')
self.test_account = os.environ.get('EMAIL_ACCOUNT')
self.test_password = os.environ.get('EMAIL_PASSWORD')
self.test_smtp_server = os.environ.get('EMAIL_SMTP_SERVER')
self.test_from_email = 'nobody@nowhere.com'
self.maximum_wait_seconds = 60 * 5
settings.EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
settings.EMAIL_HOST = self.test_smtp_server
settings.EMAIL_PORT = 587
settings.EMAIL_HOST_USER = self.test_account
settings.EMAIL_HOST_PASSWORD = self.test_password
settings.EMAIL_USE_TLS = True
super().setUp()
def _get_new_messages(self, mailbox, condition=None):
start_time = time.time()
# wait until there is at least one message
while time.time() - start_time < self.maximum_wait_seconds:
messages = self.mailbox.get_new_mail(condition)
try:
# check if generator contains at least one element
message = next(messages)
yield message
yield from messages
return
except StopIteration:
time.sleep(5)
raise EmailIntegrationTimeout()
def _get_email_as_text(self, name):
with open(
os.path.join(
os.path.dirname(__file__),
'messages',
name,
),
'rb'
) as f:
return f.read()
def _get_email_object(self, name):
copy = self._get_email_as_text(name)
return email.message_from_bytes(copy)
def _headers_identical(self, left, right, header=None):
""" Check if headers are (close enough to) identical.
* This is particularly tricky because Python 2.6, Python 2.7 and
Python 3 each handle header strings slightly differently. This
should mash away all of the differences, though.
* This also has a small loophole in that when re-writing e-mail
payload encodings, we re-build the Content-Type header, so if the
header was originally unquoted, it will be quoted when rehydrating
the e-mail message.
"""
if header.lower() == 'content-type':
# Special case; given that we re-write the header, we'll be quoting
# the new content type; we need to make sure that doesn't cause
# this comparison to fail. Also, the case of the encoding could
# be changed, etc. etc. etc.
left = left.replace('"', '').upper()
right = right.replace('"', '').upper()
left = left.replace('\n\t', ' ').replace('\n ', ' ')
right = right.replace('\n\t', ' ').replace('\n ', ' ')
if right != left:
return False
return True
def compare_email_objects(self, left, right):
# Compare headers
for key, value in left.items():
if not right[key] and key in self.ALLOWED_EXTRA_HEADERS:
continue
if not right[key]:
raise AssertionError("Extra header '%s'" % key)
if not self._headers_identical(right[key], value, header=key):
raise AssertionError(
"Header '{}' unequal:\n{}\n{}".format(
key,
repr(value),
repr(right[key]),
)
)
for key, value in right.items():
if not left[key] and key in self.ALLOWED_EXTRA_HEADERS:
continue
if not left[key]:
raise AssertionError("Extra header '%s'" % key)
if not self._headers_identical(left[key], value, header=key):
raise AssertionError(
"Header '{}' unequal:\n{}\n{}".format(
key,
repr(value),
repr(right[key]),
)
)
if left.is_multipart() != right.is_multipart():
self._raise_mismatched(left, right)
if left.is_multipart():
left_payloads = left.get_payload()
right_payloads = right.get_payload()
if len(left_payloads) != len(right_payloads):
self._raise_mismatched(left, right)
for n in range(len(left_payloads)):
self.compare_email_objects(
left_payloads[n],
right_payloads[n]
)
else:
if left.get_payload() is None or right.get_payload() is None:
if left.get_payload() is None:
if right.get_payload is not None:
self._raise_mismatched(left, right)
if right.get_payload() is None:
if left.get_payload is not None:
self._raise_mismatched(left, right)
elif left.get_payload().strip() != right.get_payload().strip():
self._raise_mismatched(left, right)
def _raise_mismatched(self, left, right):
raise AssertionError(
"Message payloads do not match:\n{}\n{}".format(
left.as_string(),
right.as_string()
)
)
def assertEqual(self, left, right): # noqa: N802
if not isinstance(left, email.message.Message):
return super().assertEqual(left, right)
return self.compare_email_objects(left, right)
def tearDown(self):
for message in Message.objects.all():
message.delete()
models.ALLOWED_MIMETYPES = self._ALLOWED_MIMETYPES
models.STRIP_UNALLOWED_MIMETYPES = self._STRIP_UNALLOWED_MIMETYPES
models.TEXT_STORED_MIMETYPES = self._TEXT_STORED_MIMETYPES
self.mailbox.delete()
super().tearDown()
| {
"content_hash": "aefd0fdda37974d056427c017563a584",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 79,
"avg_line_length": 35.92670157068063,
"alnum_prop": 0.5504226173127368,
"repo_name": "ad-m/django-mailbox",
"id": "b1215c5e326689eb1deeaa4859152fa5b8f48552",
"size": "6862",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django_mailbox/tests/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "145934"
}
],
"symlink_target": ""
} |
import time
from typing import Any, Dict, List, Optional, Union
from datadog import api, initialize
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.utils.log.logging_mixin import LoggingMixin
class DatadogHook(BaseHook, LoggingMixin):
"""
Uses datadog API to send metrics of practically anything measurable,
so it's possible to track # of db records inserted/deleted, records read
from file and many other useful metrics.
Depends on the datadog API, which has to be deployed on the same server where
Airflow runs.
:param datadog_conn_id: The connection to datadog, containing metadata for api keys.
:param datadog_conn_id: str
"""
def __init__(self, datadog_conn_id: str = 'datadog_default') -> None:
super().__init__()
conn = self.get_connection(datadog_conn_id)
self.api_key = conn.extra_dejson.get('api_key', None)
self.app_key = conn.extra_dejson.get('app_key', None)
self.source_type_name = conn.extra_dejson.get('source_type_name', None)
# If the host is populated, it will use that hostname instead.
# for all metric submissions.
self.host = conn.host
if self.api_key is None:
raise AirflowException("api_key must be specified in the Datadog connection details")
self.log.info("Setting up api keys for Datadog")
initialize(api_key=self.api_key, app_key=self.app_key)
def validate_response(self, response: Dict[str, Any]) -> None:
"""Validate Datadog response"""
if response['status'] != 'ok':
self.log.error("Datadog returned: %s", response)
raise AirflowException("Error status received from Datadog")
def send_metric(
self,
metric_name: str,
datapoint: Union[float, int],
tags: Optional[List[str]] = None,
type_: Optional[str] = None,
interval: Optional[int] = None,
) -> Dict[str, Any]:
"""
Sends a single datapoint metric to DataDog
:param metric_name: The name of the metric
:type metric_name: str
:param datapoint: A single integer or float related to the metric
:type datapoint: int or float
:param tags: A list of tags associated with the metric
:type tags: list
:param type_: Type of your metric: gauge, rate, or count
:type type_: str
:param interval: If the type of the metric is rate or count, define the corresponding interval
:type interval: int
"""
response = api.Metric.send(
metric=metric_name, points=datapoint, host=self.host, tags=tags, type=type_, interval=interval
)
self.validate_response(response)
return response
def query_metric(self, query: str, from_seconds_ago: int, to_seconds_ago: int) -> Dict[str, Any]:
"""
Queries datadog for a specific metric, potentially with some
function applied to it and returns the results.
:param query: The datadog query to execute (see datadog docs)
:type query: str
:param from_seconds_ago: How many seconds ago to start querying for.
:type from_seconds_ago: int
:param to_seconds_ago: Up to how many seconds ago to query for.
:type to_seconds_ago: int
"""
now = int(time.time())
response = api.Metric.query(start=now - from_seconds_ago, end=now - to_seconds_ago, query=query)
self.validate_response(response)
return response
# pylint: disable=too-many-arguments
def post_event(
self,
title: str,
text: str,
aggregation_key: Optional[str] = None,
alert_type: Optional[str] = None,
date_happened: Optional[int] = None,
handle: Optional[str] = None,
priority: Optional[str] = None,
related_event_id: Optional[int] = None,
tags: Optional[List[str]] = None,
device_name: Optional[List[str]] = None,
) -> Dict[str, Any]:
"""
Posts an event to datadog (processing finished, potentially alerts, other issues)
Think about this as a means to maintain persistence of alerts, rather than
alerting itself.
:param title: The title of the event
:type title: str
:param text: The body of the event (more information)
:type text: str
:param aggregation_key: Key that can be used to aggregate this event in a stream
:type aggregation_key: str
:param alert_type: The alert type for the event, one of
["error", "warning", "info", "success"]
:type alert_type: str
:param date_happened: POSIX timestamp of the event; defaults to now
:type date_happened: int
:handle: User to post the event as; defaults to owner of the application key used
to submit.
:param handle: str
:param priority: Priority to post the event as. ("normal" or "low", defaults to "normal")
:type priority: str
:param related_event_id: Post event as a child of the given event
:type related_event_id: id
:param tags: List of tags to apply to the event
:type tags: list[str]
:param device_name: device_name to post the event with
:type device_name: list
"""
response = api.Event.create(
title=title,
text=text,
aggregation_key=aggregation_key,
alert_type=alert_type,
date_happened=date_happened,
handle=handle,
priority=priority,
related_event_id=related_event_id,
tags=tags,
host=self.host,
device_name=device_name,
source_type_name=self.source_type_name,
)
self.validate_response(response)
return response
| {
"content_hash": "37c56ede0da647d8c4733b74a608bf60",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 106,
"avg_line_length": 38.57516339869281,
"alnum_prop": 0.6247034903422569,
"repo_name": "airbnb/airflow",
"id": "b4f7fac2f067fae1060aa07d7250513cb4a12dc3",
"size": "6690",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airflow/providers/datadog/hooks/datadog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36374"
},
{
"name": "HTML",
"bytes": "99535"
},
{
"name": "JavaScript",
"bytes": "891618"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "796220"
},
{
"name": "Shell",
"bytes": "9040"
}
],
"symlink_target": ""
} |
import logging
import os
import fixtures
import shade
import testtools
import yaml
from nodepool import tests
from nodepool.provider_manager import shade_inner_exceptions
class TestShadeIntegration(tests.IntegrationTestCase):
def _cleanup_cloud_config(self):
os.remove(self.clouds_path)
def _use_cloud_config(self, config):
config_dir = fixtures.TempDir()
self.useFixture(config_dir)
self.clouds_path = os.path.join(config_dir.path, 'clouds.yaml')
self.useFixture(fixtures.MonkeyPatch(
'os_client_config.config.CONFIG_FILES',
[self.clouds_path]))
with open(self.clouds_path, 'w') as h:
yaml.safe_dump(config, h)
self.addCleanup(self._cleanup_cloud_config)
def test_nodepool_provider_config(self):
configfile = self.setup_config('integration.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
pool.updateConfig()
provider_manager = pool.config.provider_managers['real-provider']
auth_data = {'username': 'real',
'project_id': 'real',
'password': 'real',
'auth_url': 'real'}
self.assertEqual(provider_manager._client.auth, auth_data)
self.assertEqual(provider_manager._client.region_name, 'real-region')
def test_nodepool_osc_config(self):
configfile = self.setup_config('integration_osc.yaml')
auth_data = {'username': 'os_real',
'project_name': 'os_real',
'password': 'os_real',
'auth_url': 'os_real'}
osc_config = {'clouds': {'real-cloud': {'auth': auth_data}}}
self._use_cloud_config(osc_config)
pool = self.useNodepool(configfile, watermark_sleep=1)
pool.updateConfig()
provider_manager = pool.config.provider_managers['real-provider']
self.assertEqual(provider_manager._client.auth, auth_data)
def test_nodepool_osc_config_reload(self):
configfile = self.setup_config('integration_osc.yaml')
auth_data = {'username': 'os_real',
'project_name': 'os_real',
'password': 'os_real',
'auth_url': 'os_real'}
osc_config = {'clouds': {'real-cloud': {'auth': auth_data}}}
self._use_cloud_config(osc_config)
pool = self.useNodepool(configfile, watermark_sleep=1)
pool.updateConfig()
provider_manager = pool.config.provider_managers['real-provider']
self.assertEqual(provider_manager._client.auth, auth_data)
# update the config
auth_data['password'] = 'os_new_real'
os.remove(self.clouds_path)
with open(self.clouds_path, 'w') as h:
yaml.safe_dump(osc_config, h)
pool.updateConfig()
provider_manager = pool.config.provider_managers['real-provider']
self.assertEqual(provider_manager._client.auth, auth_data)
def test_exceptions(self):
log = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
with testtools.ExpectedException(shade.OpenStackCloudException):
with shade_inner_exceptions():
try:
raise Exception("inner test")
except:
raise shade.OpenStackCloudException("outer test")
self.assertTrue('Exception("inner test")' in log.output)
| {
"content_hash": "971c393e1040d7706f8cf72f2f5ba0f1",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 77,
"avg_line_length": 38.96590909090909,
"alnum_prop": 0.6112569262175561,
"repo_name": "seandst/nodepool",
"id": "b1430d20aca1914013f08a78923732924b093bd5",
"size": "4040",
"binary": false,
"copies": "2",
"ref": "refs/heads/pulp",
"path": "nodepool/tests/test_shade_integration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "374188"
},
{
"name": "Shell",
"bytes": "12644"
}
],
"symlink_target": ""
} |
'''
This module contains sciluigi's subclasses of luigi's Task class.
'''
import datetime
import luigi
import logging
import os
import sciluigi
import sciluigi.audit
import sciluigi.interface
import sciluigi.dependencies
import sciluigi.slurm
log = logging.getLogger('sciluigi-interface')
# ==============================================================================
class WorkflowTask(sciluigi.audit.AuditTrailHelpers, luigi.Task):
'''
SciLuigi-specific task, that has a method for implementing a (dynamic) workflow
definition (workflow()).
'''
instance_name = luigi.Parameter(default='sciluigi_workflow')
_tasks = {}
_wfstart = ''
_wflogpath = ''
_hasloggedstart = False
_hasloggedfinish = False
_hasaddedhandler = False
def _ensure_timestamp(self):
'''
Make sure that there is a time stamp for when the workflow started.
'''
if self._wfstart == '':
self._wfstart = datetime.datetime.utcnow().strftime('%Y%m%d_%H%M%S_%f')
def get_wflogpath(self):
'''
Get the path to the workflow-speicfic log file.
'''
if self._wflogpath == '':
self._ensure_timestamp()
clsname = self.__class__.__name__.lower()
logpath = 'log/workflow_' + clsname + '_started_{t}.log'.format(t=self._wfstart)
self._wflogpath = logpath
return self._wflogpath
def get_auditdirpath(self):
'''
Get the path to the workflow-speicfic audit trail directory.
'''
self._ensure_timestamp()
clsname = self.__class__.__name__.lower()
audit_dirpath = 'audit/.audit_%s_%s' % (clsname, self._wfstart)
return audit_dirpath
def get_auditlogpath(self):
'''
Get the path to the workflow-speicfic audit trail file.
'''
self._ensure_timestamp()
clsname = self.__class__.__name__.lower()
audit_dirpath = 'audit/workflow_%s_started_%s.audit' % (clsname, self._wfstart)
return audit_dirpath
def add_auditinfo(self, infotype, infolog):
'''
Add audit information to the audit log.
'''
return self._add_auditinfo(self.__class__.__name__.lower(), infotype, infolog)
def workflow(self):
'''
SciLuigi API methoed. Implement your workflow here, and return the last task(s)
of the dependency graph.
'''
raise WorkflowNotImplementedException(
'workflow() method is not implemented, for ' + str(self))
def requires(self):
'''
Implementation of Luigi API method.
'''
if not self._hasaddedhandler:
wflog_formatter = logging.Formatter(
sciluigi.interface.LOGFMT_STREAM,
sciluigi.interface.DATEFMT)
wflog_file_handler = logging.FileHandler(self.output()['log'].path)
wflog_file_handler.setLevel(logging.INFO)
wflog_file_handler.setFormatter(wflog_formatter)
log.addHandler(wflog_file_handler)
luigilog = logging.getLogger('luigi-interface')
luigilog.addHandler(wflog_file_handler)
self._hasaddedhandler = True
clsname = self.__class__.__name__
if not self._hasloggedstart:
log.info('-'*80)
log.info('SciLuigi: %s Workflow Started (logging to %s)', clsname, self.get_wflogpath())
log.info('-'*80)
self._hasloggedstart = True
workflow_output = self.workflow()
if workflow_output is None:
clsname = self.__class__.__name__
raise Exception(('Nothing returned from workflow() method in the %s Workflow task. '
'Forgot to add a return statement at the end?') % clsname)
return workflow_output
def output(self):
'''
Implementation of Luigi API method
'''
return {'log': luigi.LocalTarget(self.get_wflogpath()),
'audit': luigi.LocalTarget(self.get_auditlogpath())}
def run(self):
'''
Implementation of Luigi API method
'''
if self.output()['audit'].exists():
errmsg = ('Audit file already exists, '
'when trying to create it: %s') % self.output()['audit'].path
log.error(errmsg)
raise Exception(errmsg)
else:
with self.output()['audit'].open('w') as auditfile:
for taskname in sorted(self._tasks):
taskaudit_path = os.path.join(self.get_auditdirpath(), taskname)
if os.path.exists(taskaudit_path):
auditfile.write(open(taskaudit_path).read() + '\n')
clsname = self.__class__.__name__
if not self._hasloggedfinish:
log.info('-'*80)
log.info('SciLuigi: %s Workflow Finished (workflow log at %s)', clsname, self.get_wflogpath())
log.info('-'*80)
self._hasloggedfinish = True
def new_task(self, instance_name, cls, **kwargs):
'''
Create new task instance, and link it to the current workflow.
'''
newtask = sciluigi.new_task(instance_name, cls, self, **kwargs)
self._tasks[instance_name] = newtask
return newtask
# ================================================================================
class WorkflowNotImplementedException(Exception):
'''
Exception to throw if the workflow() SciLuigi API method is not implemented.
'''
pass
| {
"content_hash": "72ddabb64a0c70de5a0ac89c1fa654f4",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 106,
"avg_line_length": 35.92258064516129,
"alnum_prop": 0.5678879310344828,
"repo_name": "pharmbio/sciluigi",
"id": "63f9738c8af0e5087d57fbfd3060d25f852beab2",
"size": "5568",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sciluigi/workflow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39080"
},
{
"name": "Shell",
"bytes": "229"
}
],
"symlink_target": ""
} |
from typing import Any, List, Mapping, Optional, Tuple
from xml.etree.ElementTree import Element, SubElement
import markdown
from markdown.extensions import Extension
from zerver.lib.markdown import ResultWithFamily, walk_tree_with_family
class NestedCodeBlocksRenderer(Extension):
def extendMarkdown(self, md: markdown.Markdown) -> None:
md.treeprocessors.register(
NestedCodeBlocksRendererTreeProcessor(md, self.getConfigs()),
"nested_code_blocks",
-500,
)
class NestedCodeBlocksRendererTreeProcessor(markdown.treeprocessors.Treeprocessor):
def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None:
super().__init__(md)
def run(self, root: Element) -> None:
code_tags = walk_tree_with_family(root, self.get_code_tags)
nested_code_blocks = self.get_nested_code_blocks(code_tags)
for block in nested_code_blocks:
tag, text = block.result
codehilite_block = self.get_codehilite_block(text)
self.replace_element(block.family.grandparent, codehilite_block, block.family.parent)
def get_code_tags(self, e: Element) -> Optional[Tuple[str, Optional[str]]]:
if e.tag == "code":
return (e.tag, e.text)
return None
def get_nested_code_blocks(
self,
code_tags: List[ResultWithFamily[Tuple[str, Optional[str]]]],
) -> List[ResultWithFamily[Tuple[str, Optional[str]]]]:
nested_code_blocks = []
for code_tag in code_tags:
parent: Any = code_tag.family.parent
grandparent: Any = code_tag.family.grandparent
if parent.tag == "p" and grandparent.tag == "li":
# if the parent (<p>) has no text, and no children,
# that means that the <code> element inside is its
# only thing inside the bullet, we can confidently say
# that this is a nested code block
if (
parent.text is None
and len(list(parent)) == 1
and len(list(parent.itertext())) == 1
):
nested_code_blocks.append(code_tag)
return nested_code_blocks
def get_codehilite_block(self, code_block_text: Optional[str]) -> Element:
div = Element("div")
div.set("class", "codehilite")
pre = SubElement(div, "pre")
pre.text = code_block_text
return div
def replace_element(
self,
parent: Optional[Element],
replacement: Element,
element_to_replace: Element,
) -> None:
if parent is None:
return
for index, child in enumerate(parent):
if child is element_to_replace:
parent.insert(index, replacement)
parent.remove(element_to_replace)
def makeExtension(*args: Any, **kwargs: str) -> NestedCodeBlocksRenderer:
return NestedCodeBlocksRenderer(**kwargs)
| {
"content_hash": "97ae438a6ea7c0190d00e2939862739b",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 97,
"avg_line_length": 37.04938271604938,
"alnum_prop": 0.6081306231256248,
"repo_name": "punchagan/zulip",
"id": "6492db8e432bec118a65f713c5c82ba09a77448d",
"size": "3001",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "zerver/lib/markdown/nested_code_blocks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "453615"
},
{
"name": "Dockerfile",
"bytes": "4898"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "607321"
},
{
"name": "Handlebars",
"bytes": "315160"
},
{
"name": "JavaScript",
"bytes": "3572990"
},
{
"name": "Perl",
"bytes": "9884"
},
{
"name": "Puppet",
"bytes": "94991"
},
{
"name": "Python",
"bytes": "8750579"
},
{
"name": "Ruby",
"bytes": "3875"
},
{
"name": "Shell",
"bytes": "134468"
},
{
"name": "TypeScript",
"bytes": "223296"
}
],
"symlink_target": ""
} |
import annoying.fields
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_add_banned_status'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='allow_ads',
field=models.BooleanField(default=True, help_text='If unchecked, you will still see community ads.', verbose_name='See paid advertising'),
),
migrations.AlterField(
model_name='userprofile',
name='user',
field=annoying.fields.AutoOneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL, verbose_name='User'),
),
]
| {
"content_hash": "2253b9ab79dd8287f71eccc56f4d2c10",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 171,
"avg_line_length": 33.541666666666664,
"alnum_prop": 0.6509316770186335,
"repo_name": "rtfd/readthedocs.org",
"id": "f5d6ae3d0295075c036f5fb1d41e693aa7e70dec",
"size": "878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "readthedocs/core/migrations/0004_ad-opt-out.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "66552"
},
{
"name": "Dockerfile",
"bytes": "205"
},
{
"name": "HTML",
"bytes": "196998"
},
{
"name": "JavaScript",
"bytes": "431128"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "1821332"
},
{
"name": "Shell",
"bytes": "682"
}
],
"symlink_target": ""
} |
from FeatureGenerator import *
from TfidfFeatureGenerator import *
import pandas as pd
import numpy as np
from scipy.sparse import vstack
import cPickle
from sklearn.decomposition import TruncatedSVD
from helpers import *
class SvdFeatureGenerator(FeatureGenerator):
def __init__(self, name='svdFeatureGenerator'):
super(SvdFeatureGenerator, self).__init__(name)
def process(self, df):
n_train = df[~df['target'].isnull()].shape[0]
print 'SvdFeatureGenerator, n_train:',n_train
n_test = df[df['target'].isnull()].shape[0]
print 'SvdFeatureGenerator, n_test:',n_test
tfidfGenerator = TfidfFeatureGenerator('tfidf')
featuresTrain = tfidfGenerator.read('train')
xHeadlineTfidfTrain, xBodyTfidfTrain = featuresTrain[0], featuresTrain[1]
xHeadlineTfidf = xHeadlineTfidfTrain
xBodyTfidf = xBodyTfidfTrain
if n_test > 0:
# test set is available
featuresTest = tfidfGenerator.read('test')
xHeadlineTfidfTest, xBodyTfidfTest = featuresTest[0], featuresTest[1]
xHeadlineTfidf = vstack([xHeadlineTfidfTrain, xHeadlineTfidfTest])
xBodyTfidf = vstack([xBodyTfidfTrain, xBodyTfidfTest])
# compute the cosine similarity between truncated-svd features
svd = TruncatedSVD(n_components=50, n_iter=15)
xHBTfidf = vstack([xHeadlineTfidf, xBodyTfidf])
svd.fit(xHBTfidf) # fit to the combined train-test set (or the full training set for cv process)
print 'xHeadlineTfidf.shape:'
print xHeadlineTfidf.shape
xHeadlineSvd = svd.transform(xHeadlineTfidf)
print 'xHeadlineSvd.shape:'
print xHeadlineSvd.shape
xHeadlineSvdTrain = xHeadlineSvd[:n_train, :]
outfilename_hsvd_train = "train.headline.svd.pkl"
with open(outfilename_hsvd_train, "wb") as outfile:
cPickle.dump(xHeadlineSvdTrain, outfile, -1)
print 'headline svd features of training set saved in %s' % outfilename_hsvd_train
if n_test > 0:
# test set is available
xHeadlineSvdTest = xHeadlineSvd[n_train:, :]
outfilename_hsvd_test = "test.headline.svd.pkl"
with open(outfilename_hsvd_test, "wb") as outfile:
cPickle.dump(xHeadlineSvdTest, outfile, -1)
print 'headline svd features of test set saved in %s' % outfilename_hsvd_test
xBodySvd = svd.transform(xBodyTfidf)
print 'xBodySvd.shape:'
print xBodySvd.shape
xBodySvdTrain = xBodySvd[:n_train, :]
outfilename_bsvd_train = "train.body.svd.pkl"
with open(outfilename_bsvd_train, "wb") as outfile:
cPickle.dump(xBodySvdTrain, outfile, -1)
print 'body svd features of training set saved in %s' % outfilename_bsvd_train
if n_test > 0:
# test set is available
xBodySvdTest = xBodySvd[n_train:, :]
outfilename_bsvd_test = "test.body.svd.pkl"
with open(outfilename_bsvd_test, "wb") as outfile:
cPickle.dump(xBodySvdTest, outfile, -1)
print 'body svd features of test set saved in %s' % outfilename_bsvd_test
simSvd = np.asarray(map(cosine_sim, xHeadlineSvd, xBodySvd))[:, np.newaxis]
print 'simSvd.shape:'
print simSvd.shape
simSvdTrain = simSvd[:n_train]
outfilename_simsvd_train = "train.sim.svd.pkl"
with open(outfilename_simsvd_train, "wb") as outfile:
cPickle.dump(simSvdTrain, outfile, -1)
print 'svd sim. features of training set saved in %s' % outfilename_simsvd_train
if n_test > 0:
# test set is available
simSvdTest = simSvd[n_train:]
outfilename_simsvd_test = "test.sim.svd.pkl"
with open(outfilename_simsvd_test, "wb") as outfile:
cPickle.dump(simSvdTest, outfile, -1)
print 'svd sim. features of test set saved in %s' % outfilename_simsvd_test
return 1
def read(self, header='train'):
filename_hsvd = "%s.headline.svd.pkl" % header
with open(filename_hsvd, "rb") as infile:
xHeadlineSvd = cPickle.load(infile)
filename_bsvd = "%s.body.svd.pkl" % header
with open(filename_bsvd, "rb") as infile:
xBodySvd = cPickle.load(infile)
filename_simsvd = "%s.sim.svd.pkl" % header
with open(filename_simsvd, "rb") as infile:
simSvd = cPickle.load(infile)
print 'xHeadlineSvd.shape:'
print xHeadlineSvd.shape
#print type(xHeadlineSvd)
print 'xBodySvd.shape:'
print xBodySvd.shape
#print type(xBodySvd)
print 'simSvd.shape:'
print simSvd.shape
#print type(simSvd)
return [xHeadlineSvd, xBodySvd, simSvd.reshape(-1, 1)]
#return [simSvd.reshape(-1, 1)]
# Copyright 2017 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| {
"content_hash": "79c7d7945b014e1365d4705a08bff558",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 104,
"avg_line_length": 40.201438848920866,
"alnum_prop": 0.6347530422333572,
"repo_name": "Cisco-Talos/fnc-1",
"id": "f2fe1d1faf56141f1549b6614625080e41c04948",
"size": "5588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tree_model/SvdFeatureGenerator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "103472"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from custom_field.custom_field import CustomFieldAdmin
from .models import Foo, Bar, Place, Waiter, Restaurant
@admin.register(Foo)
class FooAdmin(admin.ModelAdmin):
pass
@admin.register(Bar)
class BarAdmin(CustomFieldAdmin, admin.ModelAdmin):
pass
admin.site.register(Place)
admin.site.register(Waiter)
admin.site.register(Restaurant)
| {
"content_hash": "49cce4ced2c5db2616d8bb98bd5e57fc",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 55,
"avg_line_length": 21.27777777777778,
"alnum_prop": 0.7911227154046997,
"repo_name": "AbhiAgarwal/django-report-builder",
"id": "a51f3c18199ab3b246ea3fc7de7136c4119229b3",
"size": "383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "report_builder_demo/demo_models/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1799"
},
{
"name": "HTML",
"bytes": "27527"
},
{
"name": "JavaScript",
"bytes": "14634"
},
{
"name": "Python",
"bytes": "110452"
}
],
"symlink_target": ""
} |
import mock
from airflow.providers.google.suite.operators.gcs_to_sheets import GCSToGoogleSheetsOperator
GCP_CONN_ID = "test"
SPREADSHEET_ID = "1234567890"
VALUES = [[1, 2, 3]]
BUCKET = "destination_bucket"
PATH = "path/to/reports"
class TestGCSToGoogleSheets:
@mock.patch("airflow.providers.google.suite.operators.gcs_to_sheets.GCSHook")
@mock.patch("airflow.providers.google.suite.operators.gcs_to_sheets.GSheetsHook")
@mock.patch("airflow.providers.google.suite.operators.gcs_to_sheets.NamedTemporaryFile")
@mock.patch("airflow.providers.google.suite.operators.gcs_to_sheets.csv.reader")
def test_execute(self, mock_reader, mock_tempfile, mock_sheet_hook, mock_gcs_hook):
filename = "file://97g23r"
file_handle = mock.MagicMock()
mock_tempfile.return_value.__enter__.return_value = file_handle
mock_tempfile.return_value.__enter__.return_value.name = filename
mock_reader.return_value = VALUES
op = GCSToGoogleSheetsOperator(
task_id="test_task",
spreadsheet_id=SPREADSHEET_ID,
bucket_name=BUCKET,
object_name=PATH,
gcp_conn_id=GCP_CONN_ID,
)
op.execute(None)
mock_sheet_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=None
)
mock_gcs_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, delegate_to=None)
mock_gcs_hook.return_value.download.assert_called_once_with(
bucket_name=BUCKET, object_name=PATH, filename=filename
)
mock_reader.assert_called_once_with(file_handle)
mock_sheet_hook.return_value.update_values.assert_called_once_with(
spreadsheet_id=SPREADSHEET_ID,
range_="Sheet1",
values=VALUES,
)
| {
"content_hash": "a4eec1b93498a1220880f140338d011e",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 92,
"avg_line_length": 37.645833333333336,
"alnum_prop": 0.6646375207526287,
"repo_name": "wooga/airflow",
"id": "dda4a978957c0e1e011767536158b54c4a6c6f06",
"size": "2593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/providers/google/suite/operators/test_gcs_to_sheets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "4084"
},
{
"name": "HTML",
"bytes": "128446"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5879650"
},
{
"name": "Shell",
"bytes": "41820"
}
],
"symlink_target": ""
} |
import datetime
import typing
import attr
from tracki.src.domain import exceptions
from tracki.src.domain.entities import category as category_entity
@attr.s
class Shift:
category: category_entity.Category = attr.ib()
start_time: typing.Optional[datetime.datetime] = attr.ib(default=None)
end_time: typing.Optional[datetime.datetime] = attr.ib(default=None)
def start(self) -> None:
if self.start_time:
raise exceptions.ShiftAlreadyStartedException
self.start_time = datetime.datetime.now()
def end(self) -> None:
if not self.start_time:
raise exceptions.ShiftNotYetStartedException
if self.end_time:
raise exceptions.ShiftAlreadyEndedException
self.end_time = datetime.datetime.now()
@property
def is_running(self) -> bool:
return bool(self.start_time) and not bool(self.end_time)
| {
"content_hash": "6e09b3f9208dd4cd68b3f7d76347daf7",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 74,
"avg_line_length": 30.033333333333335,
"alnum_prop": 0.6936736958934517,
"repo_name": "rok-povsic/Tracki",
"id": "c7369faf1ff70c33ff5960ee130974d62ca32a13",
"size": "901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tracki/src/domain/entities/shift.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "204"
},
{
"name": "C#",
"bytes": "33486"
}
],
"symlink_target": ""
} |
from base import *
from bibserver.importer import Importer
import bibserver.dao
import os
class TestImporter:
@classmethod
def setup_class(cls):
pass
@classmethod
def teardown_class(cls):
conn, db = dao.get_conn()
conn.delete_index(TESTDB)
def test_upload(self):
owner = dao.Account(id='testaccount1')
owner.save()
i = Importer(owner=owner)
data = open('test/data/sample.bibtex.bibjson')
collection_in = {
'label': u'My Test Collection'
}
coll, records = i.upload(data, collection_in)
assert coll.id
assert owner.collections[0].id == coll.id, owner.collections
assert len(records) == 1, records
recid = records[0]['_id']
out = bibserver.dao.Record.get(recid)
assert out["year"] == '2008', out
assert out['collection'] == coll['collection']
# now try uploading exactly the same data again
data = open('test/data/sample.bibtex.bibjson')
newcoll, records = i.upload(data, collection_in)
# still should have only one collection
assert len(owner.collections) == 1
assert newcoll.id == coll.id
assert len(records) == 1
assert records[0]['collection'] == coll['collection']
# still should have only one record in it
recs_for_collection = dao.Record.query('collection:"' + coll['collection'] + '"')
assert recs_for_collection['hits']['total'] == 1, recs_for_collection
| {
"content_hash": "e24a5f0bfae753a99d4018d7fc073457",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 89,
"avg_line_length": 33.21739130434783,
"alnum_prop": 0.6073298429319371,
"repo_name": "flowsta/bibserver",
"id": "7b17ffd85edc2ba1f00cd7da1e828d80909cc058",
"size": "1528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_importer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "421"
},
{
"name": "HTML",
"bytes": "47207"
},
{
"name": "JavaScript",
"bytes": "9154"
},
{
"name": "Perl",
"bytes": "22029"
},
{
"name": "Python",
"bytes": "202940"
}
],
"symlink_target": ""
} |
import tweepy, psycopg2, os, json, datetime,sys
f = open("sosagua.txt", "a")
#connstr para bd
#dev str
conn_string = "host='localhost' dbname='sosagua' user='postgres' password='Guatemala1'"
#produccion str
#conn_string = "host='localhost' dbname='sosagua' user='postgres' password='postgres2020!Incyt'"
'''
HERLICH STEVEN GONZALEZ ZAMBRANO 2020 --> EN CUARENTENA MUNDIAL
API key:
bCgXMvHfVr1f86jrcwJSIbfyU
API secret key:
UUYjGwM63n6UvZMPTmfZG0yUq5eDm2PE5747F0xht71pgr2g8v
access token
1432847989-W3qw9szAWWP0VxsPpEsvVZX6igJjrVJzUZrrgYY
access token secret
332CPmsifvklzEK33F99flSAde5zz71fCiaz4V1P6qYIs
'''
# estos valores se dan al habilitar el servicio de tweeter de la cuenta.
cfg = {
"consumer_key" : "bCgXMvHfVr1f86jrcwJSIbfyU",
"consumer_secret" : "UUYjGwM63n6UvZMPTmfZG0yUq5eDm2PE5747F0xht71pgr2g8v",
"access_token" : "1432847989-W3qw9szAWWP0VxsPpEsvVZX6igJjrVJzUZrrgYY",
"access_token_secret" : "332CPmsifvklzEK33F99flSAde5zz71fCiaz4V1P6qYIs"
}
#****************************************FASE 1 *******************************************
'''
update pg_database set encoding=8 where datname='sosagua';
update pg_database set encoding = pg_char_to_encoding('UTF8') where datname = 'sosagua'
create table public.fase1(
id SERIAL PRIMARY KEY,
fecha timestamp without time zone DEFAULT now(),
twitjson json not null ,
twitstring text not null ,
origen text null,
municipio numeric null default 0,
necesidad numeric null default 1
)
create table cubo1(
municipio numeric not null,
necesidad numeric not null,
mes text not null,
ano text not null,
contador numeric not null
)
'''
def insertaTwitt(tjson,tstr):
conn = psycopg2.connect(conn_string)
cursor = conn.cursor()
print(tjson)
# cursor.execute(""" delete from test where campo1 = 'David' """)
cursor.execute(" insert into fase1 (twitjson,twitstring,origen) values ('" + json.dumps(tjson) + "','" + str(tstr).replace("'",'"') + "','Twitter')")
conn.commit()
conn.close()
#instalar el paquete de la siguiente forma: pip install tweepi
def get_api(cfg):
auth = tweepy.OAuthHandler(cfg['consumer_key'], cfg['consumer_secret'])
auth.set_access_token(cfg['access_token'], cfg['access_token_secret'])
return tweepy.API(auth)
def sendTwitt():
api = get_api(cfg)
tweet = "Hello, world! msg from an py app"
status = api.update_status(status=tweet)
class msg:
def __init__(self, created_at, id, id_str, text, entities, metadata, source, user):
self.created_at = created_at
self.id = id
self.id_str = id_str
self.text = text
self.entities = entities
self.metadata = metadata
self.source = source
self.user = user
def getTweets(search_words,date_since,number):
api = get_api(cfg)
#"#COVID2019"
tweets = tweepy.Cursor(api.search,
q=search_words,
lang="es",
since=date_since).items(number)
# Iterate and print tweets
for item in tweets:
s = item#este es el string
#m = msg(tweet.created_at,tweet.id,tweet.id_str,tweet.text,tweet.entities,tweet.metadata,tweet.source,tweet.user)#este sera el json
#json.dumps(m)
mined = {
"id": item.id,
"name": item.user.name,
"screen_name": item.user.screen_name,
"retweet_count": item.retweet_count,
"text": convUTF8(item.text),
"location": convUTF8(item.user.location),
"coordinates": str(item.coordinates),
"geo_enabled": str(item.user.geo_enabled),
"geo": str(item.geo),
"created_at": str(item.created_at),
"favorite_count": item.favorite_count,
"hashtags": item.entities['hashtags'],
"status_count": item.user.statuses_count,
"place": convUTF8(item.place),
"source": item.source
}
#print(mined)
#minedS = minedS.replace("'",'"')
insertaTwitt(str(mined).replace("'",'"'),s)
def getProcessDate():
from datetime import date
today = date.today()
yesterday = today - datetime.timedelta(days=1)
return yesterday
def convUTF8(cadena):
try:
return str(cadena).replace("á","a").replace("é","e").replace("í","i").replace("ó","o").replace("ú","u").replace("ñ","n").replace("Á","A").replace("É","E").replace("Í","I").replace("Ó","O").replace("Ú","U").replace("Ñ","Ñ")
except:
return cadena
#****************************FASE 2******************************************
'''
create table public.necesidad(
id SERIAL PRIMARY KEY,
descripcion text not null
);
create table public.sinonimos(
necesidad numeric,
sinonimo text not null unique
);
'''
def write(cadena):
f.write(str(cadena) + '\n')
def getLocation():
from psycopg2.extras import RealDictCursor
conn = psycopg2.connect(conn_string)
cursor = conn.cursor(cursor_factory=RealDictCursor)
cursor.execute("select cast(id as text), pais,departamen_1,municipi_1,cast(point_x as text),cast(point_y as text) from public.municipios")
l = json.dumps(cursor.fetchall(),indent = 2)
conn.close()
return l
def ejecutaComandoPsql(query):
#query = "insert into public.fase2 ( municipios , fase1 ) select distinct m1.id,fase1.id from municipios m1, municipios m2, fase1 where m1.id = m2.id and fase1.twitstring like '%' || m1.departamen_1 || '%' and fase1.twitstring like '%' || m2.municipi_1 || '%' and fase1.fecha > '" + fecha + " 00:00:00' "
#query = "update fase1 set municipio = m1.id from municipios m1, municipios m2 where m1.id = m2.id and fase1.twitstring like '%' || m1.departamen_1 || '%' and fase1.twitstring like '%' || m2.municipi_1 || '%' and fase1.fecha > '" + fecha + " 00:00:00' "
print(query)
conn = psycopg2.connect(conn_string)
cursor = conn.cursor()
cursor.execute(query)
conn.commit()
conn.close()
'''
def fase2(fecha):
#query = "insert into public.fase2 ( municipios , fase1 ) select distinct m1.id,fase1.id from municipios m1, municipios m2, fase1 where m1.id = m2.id and fase1.twitstring like '%' || m1.departamen_1 || '%' and fase1.twitstring like '%' || m2.municipi_1 || '%' and fase1.fecha > '" + fecha + " 00:00:00' "
print(query)
conn = psycopg2.connect(conn_string)
cursor = conn.cursor()
cursor.execute(query)
conn.commit()
conn.close()
print("terminada fase 2")
'''
#ADD HERE NEW HASHTAGS
hashtags = ["#AGUAGT", "#SOSAGUAGT", "#SINAGUA"]
#hashtags = ["#TRANSITOGT"]
nTwits = 5000
if __name__ == "__main__":
write("*************************************************************")
fecha = getProcessDate()
print(fecha)
write(fecha)
print("FASE 1.0 --> CONECTANDO A TWITTER PARA EXTRAER TWITS DEL DIA")
for x in hashtags:
print(x)
write(x)
# getTweets(x,str(fecha),nTwits)
print('FASE 1.2 --> AGREGANDO COORDENADAS DE MUNICIPIOS AL QUERY')
write('FASE 1.2 --> AGREGANDO COORDENADAS DE MUNICIPIOS AL QUERY')
#query = "update fase1 set municipio = m1.id from municipios m1, municipios m2 where m1.id = m2.id and lower(fase1.twitstring) like '%' || lower(m1.departamen_1) || '%' and lower(fase1.twitstring) like '%' || lower(m2.municipi_1) || '%' and fase1.fecha > '" + str(fecha) + " 00:00:00' "
query = "update fase1 set municipio = m1.id from municipios m1 where lower(fase1.twitstring) like '%' || lower(m1.departamen_1) || '%' and fase1.municipio = 0 and fase1.fecha > '" + str(fecha) + " 00:00:00' "
ejecutaComandoPsql(query)
query = "update fase1 set municipio = m1.id from municipios m1 where lower(fase1.twitstring) like '%' || lower(m1.municipi_1) || '%' and fase1.municipio = 0 and fase1.fecha > '" + str(fecha) + " 00:00:00' "
ejecutaComandoPsql(query)
print('FASE 1.3 --> BUSCANDO PALABRAS CLAVE PARA CLASIFICACION --> CREANDO CUBO 1')
write('FASE 1.3 --> BUSCANDO PALABRAS CLAVE PARA CLASIFICACION --> CREANDO CUBO 1')
query = "update fase1 set municipio = 0 where municipio is null"
ejecutaComandoPsql(query)
#TODO QUERY NECESIDAD
query = "update fase1 set necesidad = 0 where necesidad is null"
ejecutaComandoPsql(query)
query = "delete from cubo1"#TODO where current month and current year
ejecutaComandoPsql(query)
query = "insert into cubo1 (municipio,necesidad,mes,ano,contador) select municipio, necesidad, extract(MONTH from FECHA),extract (YEAR from FECHA), count(*) from fase1 group by municipio, necesidad, extract(MONTH from FECHA), extract(YEAR from FECHA)"
ejecutaComandoPsql(query)#TODO where current month and current year
print("proceso terminado")
write("proceso terminado")
f.close()
| {
"content_hash": "0e8d54356be17fb7b05a96aca6dc89db",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 312,
"avg_line_length": 37.16260162601626,
"alnum_prop": 0.6168234521986437,
"repo_name": "Locottus/Python",
"id": "83c2821374b4867092ec951730f03bf92bf7ec33",
"size": "9155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tweeter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "313"
},
{
"name": "HTML",
"bytes": "2214"
},
{
"name": "JavaScript",
"bytes": "671"
},
{
"name": "Python",
"bytes": "210134"
},
{
"name": "Shell",
"bytes": "341"
}
],
"symlink_target": ""
} |
google = runtime.start('google','GoogleSearch')
python.subscribe('google', 'publishResults')
def onResults(data):
print(data)
google.search('what is a goat?')
| {
"content_hash": "dc0b81f08993edc48d5506a536a8d222",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 47,
"avg_line_length": 24.142857142857142,
"alnum_prop": 0.7041420118343196,
"repo_name": "MyRobotLab/myrobotlab",
"id": "eec89b5151ac0a9d2d422ae01d03457d7d127c28",
"size": "169",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/main/resources/resource/GoogleSearch/GoogleSearch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1542"
},
{
"name": "C",
"bytes": "6677"
},
{
"name": "C++",
"bytes": "274868"
},
{
"name": "CSS",
"bytes": "83744"
},
{
"name": "GLSL",
"bytes": "757"
},
{
"name": "HTML",
"bytes": "374401"
},
{
"name": "Java",
"bytes": "7100082"
},
{
"name": "JavaScript",
"bytes": "1536187"
},
{
"name": "Propeller Spin",
"bytes": "14406"
},
{
"name": "Python",
"bytes": "191671"
},
{
"name": "Shell",
"bytes": "3547"
}
],
"symlink_target": ""
} |
from oslo_log import log
import six
import webob
from webob import exc
from manila.api.openstack import wsgi
from manila.common import constants
from manila import exception
from manila.i18n import _, _LI
from manila import share
LOG = log.getLogger(__name__)
class ShareUnmanageMixin(object):
@wsgi.Controller.authorize("unmanage")
def _unmanage(self, req, id, body=None):
"""Unmanage a share."""
context = req.environ['manila.context']
LOG.info(_LI("Unmanage share with id: %s"), id, context=context)
try:
share = self.share_api.get(context, id)
if share.get('share_server_id'):
msg = _("Operation 'unmanage' is not supported for shares "
"that are created on top of share servers "
"(created with share-networks).")
raise exc.HTTPForbidden(explanation=msg)
elif share['status'] in constants.TRANSITIONAL_STATUSES:
msg = _("Share with transitional state can not be unmanaged. "
"Share '%(s_id)s' is in '%(state)s' state.") % dict(
state=share['status'], s_id=share['id'])
raise exc.HTTPForbidden(explanation=msg)
snapshots = self.share_api.db.share_snapshot_get_all_for_share(
context, id)
if snapshots:
msg = _("Share '%(s_id)s' can not be unmanaged because it has "
"'%(amount)s' dependent snapshot(s).") % {
's_id': id, 'amount': len(snapshots)}
raise exc.HTTPForbidden(explanation=msg)
self.share_api.unmanage(context, share)
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=six.text_type(e))
except (exception.InvalidShare, exception.PolicyNotAuthorized) as e:
raise exc.HTTPForbidden(explanation=six.text_type(e))
return webob.Response(status_int=202)
class ShareUnmanageController(ShareUnmanageMixin, wsgi.Controller):
"""The Unmanage API controller for the OpenStack API."""
resource_name = "share"
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.share_api = share.API()
@wsgi.Controller.api_version('1.0', '2.6')
def unmanage(self, req, id):
return self._unmanage(req, id)
def create_resource():
return wsgi.Resource(ShareUnmanageController())
| {
"content_hash": "488c027a57c7fa11abc524190638e4af",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 79,
"avg_line_length": 37.43283582089552,
"alnum_prop": 0.6000797448165869,
"repo_name": "scality/manila",
"id": "af866c6f5445bf661f577afe2e4288f581672a54",
"size": "3106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manila/api/v1/share_unmanage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "953"
},
{
"name": "Python",
"bytes": "5912966"
},
{
"name": "Shell",
"bytes": "46081"
}
],
"symlink_target": ""
} |
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import time
#
# INFO: Your Parameters.
# You can adjust them after completing the lab
C = 1
kernel = 'linear'
iterations = 5000 # TODO: Change to 200000 once you get to Question#2
#
# INFO: You can set this to false if you want to
# draw the full square matrix
FAST_DRAW = True
def drawPlots(model, X_train, X_test, y_train, y_test, wintitle='Figure 1'):
# INFO: A convenience function for you
# You can use this to break any higher-dimensional space down
# And view cross sections of it.
# If this line throws an error, use plt.style.use('ggplot') instead
mpl.style.use('ggplot') # Look Pretty
padding = 3
resolution = 0.5
max_2d_score = 0
y_colors = ['#ff0000', '#00ff00', '#0000ff']
my_cmap = mpl.colors.ListedColormap(['#ffaaaa', '#aaffaa', '#aaaaff'])
colors = [y_colors[i] for i in y_train]
num_columns = len(X_train.columns)
fig = plt.figure()
fig.canvas.set_window_title(wintitle)
cnt = 0
for col in range(num_columns):
for row in range(num_columns):
# Easy out
if FAST_DRAW and col > row:
cnt += 1
continue
ax = plt.subplot(num_columns, num_columns, cnt + 1)
plt.xticks(())
plt.yticks(())
# Intersection:
if col == row:
plt.text(0.5, 0.5, X_train.columns[row], verticalalignment='center', horizontalalignment='center', fontsize=12)
cnt += 1
continue
# Only select two features to display, then train the model
X_train_bag = X_train.ix[:, [row,col]]
X_test_bag = X_test.ix[:, [row,col]]
model.fit(X_train_bag, y_train)
# Create a mesh to plot in
x_min, x_max = X_train_bag.ix[:, 0].min() - padding, X_train_bag.ix[:, 0].max() + padding
y_min, y_max = X_train_bag.ix[:, 1].min() - padding, X_train_bag.ix[:, 1].max() + padding
xx, yy = np.meshgrid(np.arange(x_min, x_max, resolution),
np.arange(y_min, y_max, resolution))
# Plot Boundaries
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Prepare the contour
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=my_cmap, alpha=0.8)
plt.scatter(X_train_bag.ix[:, 0], X_train_bag.ix[:, 1], c=colors, alpha=0.5)
score = round(model.score(X_test_bag, y_test) * 100, 3)
plt.text(0.5, 0, "Score: {0}".format(score), transform = ax.transAxes, horizontalalignment='center', fontsize=8)
max_2d_score = score if score > max_2d_score else max_2d_score
cnt += 1
print "Max 2D Score: ", max_2d_score
fig.set_tight_layout(True)
def benchmark(model, X_train, X_test, y_train, y_test, wintitle='Figure 1'):
print '\n\n' + wintitle + ' Results'
s = time.time()
for i in range(iterations):
#
# TODO: train the classifier on the training data / labels:
#
model.fit(X_train, y_train)
print "{0} Iterations Training Time: ".format(iterations), time.time() - s
s = time.time()
for i in range(iterations):
#
# TODO: score the classifier on the testing data / labels:
#
score = model.score(X_test, y_test)
print "{0} Iterations Scoring Time: ".format(iterations), time.time() - s
print "High-Dimensionality Score: ", round((score*100), 3)
#
# TODO: Load up the wheat dataset into dataframe 'X'
# Verify you did it properly.
# Indices shouldn't be doubled, nor weird headers...
X= pd.read_csv('Datasets/wheat.data', index_col = 0, header=0)
# INFO: An easy way to show which rows have nans in them
print X[pd.isnull(X).any(axis=1)]
#
# TODO: Go ahead and drop any row with a nan
#
X = X.dropna()
#
# INFO: # In the future, you might try setting the nan values to the
# mean value of that column, the mean should only be calculated for
# the specific class rather than across all classes, now that you
# have the labels
#
# TODO: Copy the labels out of the dset into variable 'y' then Remove
# them from X. Encode the labels, using the .map() trick we showed
# you in Module 5 -- canadian:0, kama:1, and rosa:2
#
y = X['wheat_type'].map({'canadian' : 0, 'kama' : 1, 'rosa' : 2})
del X['wheat_type']
#
# TODO: Split your data into test / train sets
# Your test size can be 30% with random_state 7.
# Use variable names: X_train, X_test, y_train, y_test
#
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 7)
#
# TODO: Create an SVC classifier named svc
# Use a linear kernel, and set the C value to C
#
from sklearn.svm import SVC
svc = SVC(kernel='linear')
#
# TODO: Create an KNeighbors classifier named knn
# Set the neighbor count to 5
#
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5)
# decision tree
from sklearn import tree
dtree = tree.DecisionTreeClassifier(max_depth=2, random_state=2)
benchmark(dtree, X_train, X_test, y_train, y_test, 'DTree')
drawPlots(dtree, X_train, X_test, y_train, y_test, 'DTree')
benchmark(knn, X_train, X_test, y_train, y_test, 'KNeighbors')
drawPlots(knn, X_train, X_test, y_train, y_test, 'KNeighbors')
benchmark(svc, X_train, X_test, y_train, y_test, 'SVC')
drawPlots(svc, X_train, X_test, y_train, y_test, 'SVC')
plt.show()
#
# BONUS: After submitting your answers, toy around with
# gamma, kernel, and C.
| {
"content_hash": "be0c5f76ea2624c5d7b0b7f07a9251fd",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 119,
"avg_line_length": 28.103092783505154,
"alnum_prop": 0.6513206162876009,
"repo_name": "mr3bn/DAT210x",
"id": "5228d1a6deb5b9b4e08b4e4d5643a356fc0b474a",
"size": "5452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Module6/assignment4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "132926"
}
],
"symlink_target": ""
} |
import pytest
from pyproj import CRS, Proj
from pyproj.exceptions import CRSError, ProjError
def test_proj_exception():
with pytest.raises(ProjError, match="Internal Proj Error"):
Proj("+proj=bobbyjoe")
def test_crs_exception():
with pytest.raises(CRSError, match="Internal Proj Error"):
CRS("+proj=bobbyjoe")
| {
"content_hash": "bb094f7b2949993c2393e0d671bd8065",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 63,
"avg_line_length": 24.214285714285715,
"alnum_prop": 0.7109144542772862,
"repo_name": "ocefpaf/pyproj",
"id": "9969a04ecf7b728d3fa1e39ac13d539454d45e32",
"size": "339",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "test/test_exception_logging.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "194556"
},
{
"name": "Makefile",
"bytes": "2671"
},
{
"name": "Python",
"bytes": "649387"
},
{
"name": "Shell",
"bytes": "10347"
}
],
"symlink_target": ""
} |
from .base import BaseElement
from ..utils import html_property
class Title(BaseElement):
"""
Title widget analogous to the HTML <h{n}> elements.
"""
def build(self, text, size=1):
"""
:param text: Text of the widget
:param size: Size of the text (Higher size = smaller title)
"""
super(Title, self).build()
self.content = text
self.size = size
def _get_html_tag(self):
return "h{0}".format(self.size)
class Paragraph(BaseElement):
"""
Simple paragraph widget
"""
html_tag = "p"
def build(self, text):
"""
:param text: Content of the paragraph
"""
super(Paragraph, self).build()
self.content = text
class Span(BaseElement):
"""
Simple span widget
"""
html_tag = "span"
def build(self, text):
"""
:param text: Content of the span
"""
super(Span, self).build()
self.content = text
class TextLink(BaseElement):
"""
Text widget linking to an external URL.
"""
html_tag = "a"
target = html_property('href')
"""
Target of the link
"""
def build(self, text, url):
"""
:param text: Text of the link
:param url: Target URL
"""
super(TextLink, self).build()
self.target = url
self.content = text
| {
"content_hash": "0348f9533039e61e027b0dae28bb01fb",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 67,
"avg_line_length": 19.123287671232877,
"alnum_prop": 0.5401146131805158,
"repo_name": "Dalloriam/engel",
"id": "56a0f8a03e92ee7f1ea617c6527538cfd732b558",
"size": "1396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "engel/widgets/text.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "255"
},
{
"name": "JavaScript",
"bytes": "4379"
},
{
"name": "Python",
"bytes": "70967"
}
],
"symlink_target": ""
} |
from util import *
import json
class ChromeTabFinder(object):
def __init__(self, host, port):
self._next_seq = 0
self._timer = Timer(250)
self._timer.tick.add_listener(self._tick)
self._host = host
self._port = port
self._session = None
self._get_tab_list_pending = False
self._tick()
def _tick(self):
if not self._session:
self._try_connect()
elif not self._get_tab_list_pending:
self._begin_get_tab_list()
def _try_connect(self):
try:
s = socket.socket()
s.connect((self._host, self._port))
ChromeTabFinder.do_handshake(s)
self._session = AsyncHTTPSession(s)
except:
self._session = None
log2("Could not connect to chrome on %s:%s", self._host, self._port)
if self._session:
self._session.closed.add_listener(self._on_session_closed)
def _on_session_closed(self):
assert self._session
self._session = None
@property
def chrome_found(self):
return self._session != None
@staticmethod
def do_handshake(s):
i = "ChromeDevToolsHandshake"
handshake = "ChromeDevToolsHandshake\r\n"
remaining = handshake
while len(remaining):
sent = s.send(handshake)
remaining = remaining[sent:]
handshake_ack = s.recv(len(handshake))
if handshake_ack != handshake:
raise Exception('handshake failed')
else:
log1("handshake succeeded")
def _begin_get_tab_list(self):
self._get_tab_list_pending = True
self._session.request({"Tool":"DevToolsService"}, json.dumps({"command" : "list_tabs"}), self._finish_get_tab_list)
def _finish_get_tab_list(self, headers, content):
self._get_tab_list_pending = False
resp = json.loads(content)
print "content=%s"%content
# print resp
def _on_close(self):
log1("chrome connection was closed. chrome processes won't be available.")
self._session = None
if __name__ == "__main__":
set_loglevel(2)
def init(*args):
try:
be = ChromeTabFinder(*args)
except:
import traceback; traceback.print_exc();
MessageLoop.quit()
# for chrome, launch with chrome --remote-shell-port
import sys
MessageLoop.add_message(init, "localhost", int(sys.argv[1]))
# MessageLoop.add_message(init, "localhost", 5858)
MessageLoop.run_no_gtk(lambda: False)
print "main done"
| {
"content_hash": "ad55bb844d992e5661621e4c1388c9c9",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 119,
"avg_line_length": 25.736263736263737,
"alnum_prop": 0.645602049530316,
"repo_name": "natduca/ndbg",
"id": "75f264f5ecf208464b513e92ceadebebf05eb6b1",
"size": "2918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "debugger/chrome_tab_finder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "4728"
},
{
"name": "C++",
"bytes": "5787"
},
{
"name": "Emacs Lisp",
"bytes": "5014"
},
{
"name": "JavaScript",
"bytes": "237"
},
{
"name": "Python",
"bytes": "554374"
},
{
"name": "Shell",
"bytes": "781"
},
{
"name": "VimL",
"bytes": "1848"
}
],
"symlink_target": ""
} |
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import unittest
from azure.mgmt.containerregistry.v2018_02_01_preview.models import (
Registry,
RegistryUpdateParameters,
StorageAccountProperties,
Sku,
SkuName,
SkuTier,
ProvisioningState,
PasswordName,
WebhookCreateParameters,
WebhookUpdateParameters,
WebhookAction,
WebhookStatus,
BuildTask,
SourceRepositoryProperties,
SourceControlAuthInfo,
PlatformProperties,
DockerBuildStep,
BuildTaskBuildRequest,
BuildTaskUpdateParameters,
SourceRepositoryUpdateParameters,
DockerBuildStepUpdateParameters,
SourceControlType,
TokenType,
OsType,
BuildTaskStatus,
BaseImageTriggerType,
BuildArgument,
QuickBuildRequest,
BuildType
)
import azure.mgmt.storage
from devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer
DEFAULT_LOCATION = 'eastus'
DEFAULT_REPLICATION_LOCATION = 'southcentralus'
DEFAULT_WEBHOOK_SERVICE_URI = 'http://www.microsoft.com'
DEFAULT_WEBHOOK_SCOPE = 'hello-world'
DEFAULT_KEY_VALUE_PAIR = {
'key': 'value'
}
# This token requires 'admin:repo_hook' access. Recycle the token after recording tests.
DEFAULT_GIT_ACCESS_TOKEN = 'f431834b9161510c40d49f0626f975a962a3c856'
DEFAULT_REPOSITORY_URL = 'https://github.com/djyou/BuildTest'
class MgmtACRTest20180201Preview(AzureMgmtTestCase):
def setUp(self):
super(MgmtACRTest20180201Preview, self).setUp()
self.client = self.create_mgmt_client(
azure.mgmt.containerregistry.ContainerRegistryManagementClient,
api_version='2018-02-01-preview'
)
@ResourceGroupPreparer(location=DEFAULT_LOCATION)
def test_managed_registry(self, resource_group, location):
registry_name = self.get_resource_name('pyacr')
name_status = self.client.registries.check_name_availability(registry_name)
self.assertTrue(name_status.name_available)
# Create a managed registry
self._create_managed_registry(registry_name, resource_group.name, location)
self._core_registry_scenario(registry_name, resource_group.name)
def _core_registry_scenario(self, registry_name, resource_group_name):
registries = list(self.client.registries.list_by_resource_group(resource_group_name))
self.assertEqual(len(registries), 1)
# Update the registry with new tags and enable admin user
registry = self.client.registries.update(
resource_group_name=resource_group_name,
registry_name=registry_name,
registry_update_parameters=RegistryUpdateParameters(
tags=DEFAULT_KEY_VALUE_PAIR,
admin_user_enabled=True
)
).result()
self.assertEqual(registry.name, registry_name)
self.assertEqual(registry.tags, DEFAULT_KEY_VALUE_PAIR)
self.assertEqual(registry.admin_user_enabled, True)
registry = self.client.registries.get(resource_group_name, registry_name)
self.assertEqual(registry.name, registry_name)
self.assertEqual(registry.tags, DEFAULT_KEY_VALUE_PAIR)
self.assertEqual(registry.admin_user_enabled, True)
credentials = self.client.registries.list_credentials(resource_group_name, registry_name)
self.assertEqual(len(credentials.passwords), 2)
credentials = self.client.registries.regenerate_credential(
resource_group_name, registry_name, PasswordName.password)
self.assertEqual(len(credentials.passwords), 2)
if registry.sku.name == SkuName.premium.value:
usages = self.client.registries.list_usages(resource_group_name, registry_name)
self.assertTrue(len(usages.value) > 1)
self.client.registries.delete(resource_group_name, registry_name).wait()
def _create_managed_registry(self, registry_name, resource_group_name, location):
registry = self.client.registries.create(
resource_group_name=resource_group_name,
registry_name=registry_name,
registry=Registry(
location=location,
sku=Sku(
name=SkuName.premium
)
)
).result()
self.assertEqual(registry.name, registry_name)
self.assertEqual(registry.location, location)
self.assertEqual(registry.sku.name, SkuName.premium.value)
self.assertEqual(registry.sku.tier, SkuTier.premium.value)
self.assertEqual(registry.provisioning_state, ProvisioningState.succeeded.value)
self.assertEqual(registry.admin_user_enabled, False)
self.assertEqual(registry.storage_account, None)
@ResourceGroupPreparer(location=DEFAULT_LOCATION)
def test_webhook(self, resource_group, location):
registry_name = self.get_resource_name('pyacr')
webhook_name = self.get_resource_name('pyacr')
# Create a managed registry
self._create_managed_registry(registry_name, resource_group.name, location)
# Create a webhook
webhook = self.client.webhooks.create(
resource_group_name=resource_group.name,
registry_name=registry_name,
webhook_name=webhook_name,
webhook_create_parameters=WebhookCreateParameters(
location=location,
service_uri=DEFAULT_WEBHOOK_SERVICE_URI,
actions=[WebhookAction.push]
)
).result()
self.assertEqual(webhook.name, webhook_name)
self.assertEqual(webhook.location, location)
self.assertEqual(webhook.provisioning_state, ProvisioningState.succeeded.value)
self.assertEqual(webhook.actions, [WebhookAction.push.value])
self.assertEqual(webhook.status, WebhookStatus.enabled.value)
webhooks = list(self.client.webhooks.list(resource_group.name, registry_name))
self.assertEqual(len(webhooks), 1)
# Update the webhook with custom headers, scope, and new tags
webhook = self.client.webhooks.update(
resource_group_name=resource_group.name,
registry_name=registry_name,
webhook_name=webhook_name,
webhook_update_parameters=WebhookUpdateParameters(
tags=DEFAULT_KEY_VALUE_PAIR,
custom_headers=DEFAULT_KEY_VALUE_PAIR,
scope=DEFAULT_WEBHOOK_SCOPE
)
).result()
self.assertEqual(webhook.name, webhook_name)
self.assertEqual(webhook.tags, DEFAULT_KEY_VALUE_PAIR)
self.assertEqual(webhook.scope, DEFAULT_WEBHOOK_SCOPE)
webhook = self.client.webhooks.get(resource_group.name, registry_name, webhook_name)
self.assertEqual(webhook.name, webhook_name)
self.assertEqual(webhook.tags, DEFAULT_KEY_VALUE_PAIR)
self.assertEqual(webhook.scope, DEFAULT_WEBHOOK_SCOPE)
webhook_config = self.client.webhooks.get_callback_config(
resource_group.name,
registry_name,
webhook_name
)
self.assertEqual(webhook_config.service_uri, DEFAULT_WEBHOOK_SERVICE_URI)
self.assertEqual(webhook_config.custom_headers, DEFAULT_KEY_VALUE_PAIR)
self.client.webhooks.ping(resource_group.name, registry_name, webhook_name)
self.client.webhooks.list_events(resource_group.name, registry_name, webhook_name)
self.client.webhooks.delete(resource_group.name, registry_name, webhook_name).wait()
self.client.registries.delete(resource_group.name, registry_name).wait()
@ResourceGroupPreparer(location=DEFAULT_LOCATION)
def test_replication(self, resource_group, location):
registry_name = self.get_resource_name('pyacr')
replication_name = DEFAULT_REPLICATION_LOCATION
# Create a managed registry
self._create_managed_registry(registry_name, resource_group.name, location)
# Create a replication
replication = self.client.replications.create(
resource_group_name=resource_group.name,
registry_name=registry_name,
replication_name=replication_name,
location=DEFAULT_REPLICATION_LOCATION
).result()
self.assertEqual(replication.name, replication_name)
self.assertEqual(replication.location, DEFAULT_REPLICATION_LOCATION)
self.assertEqual(replication.provisioning_state, ProvisioningState.succeeded.value)
replications = list(self.client.replications.list(resource_group.name, registry_name))
self.assertEqual(len(replications), 2) # 2 because a replication in home region is auto created
# Update the replication with new tags
replication = self.client.replications.update(
resource_group_name=resource_group.name,
registry_name=registry_name,
replication_name=replication_name,
tags=DEFAULT_KEY_VALUE_PAIR
).result()
self.assertEqual(replication.name, replication_name)
self.assertEqual(replication.tags, DEFAULT_KEY_VALUE_PAIR)
replication = self.client.replications.get(resource_group.name, registry_name, replication_name)
self.assertEqual(replication.name, replication_name)
self.assertEqual(replication.tags, DEFAULT_KEY_VALUE_PAIR)
self.client.replications.delete(resource_group.name, registry_name, replication_name).wait()
self.client.registries.delete(resource_group.name, registry_name).wait()
def _create_build_task(self, build_task_name, registry_name, resource_group_name, location):
build_task_create_parameters = BuildTask(
location=location,
alias=build_task_name,
source_repository=SourceRepositoryProperties(
source_control_type=SourceControlType.github,
repository_url=DEFAULT_REPOSITORY_URL,
is_commit_trigger_enabled=True,
source_control_auth_properties=SourceControlAuthInfo(
token=DEFAULT_GIT_ACCESS_TOKEN,
token_type=TokenType.pat,
refresh_token='',
scope='repo',
expires_in=1313141
)
),
platform=PlatformProperties(os_type=OsType.linux, cpu=1),
status=BuildTaskStatus.enabled
)
build_task = self.client.build_tasks.create(
resource_group_name=resource_group_name,
registry_name=registry_name,
build_task_name=build_task_name,
build_task_create_parameters=build_task_create_parameters
).result()
self.assertEqual(build_task.name, build_task_name)
self.assertEqual(build_task.location, location)
self.assertEqual(build_task.platform.os_type, OsType.linux.value)
self.assertEqual(build_task.platform.cpu, 1)
self.assertEqual(build_task.provisioning_state, ProvisioningState.succeeded.value)
self.assertEqual(build_task.status, BuildTaskStatus.enabled.value)
self.assertEqual(build_task.source_repository.repository_url, DEFAULT_REPOSITORY_URL)
self.assertEqual(build_task.source_repository.source_control_type, SourceControlType.github.value)
self.assertEqual(build_task.source_repository.is_commit_trigger_enabled, True)
def _create_build_step(self, build_step_name, build_task_name, registry_name, resource_group_name, location):
docker_build_step = DockerBuildStep(
branch='main',
image_names=['repo:tag'],
is_push_enabled=True,
no_cache=False,
docker_file_path='Dockerfile',
build_arguments=[],
base_image_trigger=BaseImageTriggerType.runtime
)
build_step = self.client.build_steps.create(
resource_group_name=resource_group_name,
registry_name=registry_name,
build_task_name=build_task_name,
step_name=build_step_name,
properties=docker_build_step
).result()
self.assertEqual(build_step.name, build_step_name)
self.assertEqual(build_step.properties.branch, 'main')
self.assertEqual(build_step.properties.image_names, ['repo:tag'])
self.assertEqual(build_step.properties.is_push_enabled, True)
self.assertEqual(build_step.properties.no_cache, False)
self.assertEqual(build_step.properties.docker_file_path, 'Dockerfile')
self.assertEqual(build_step.properties.build_arguments, [])
self.assertEqual(build_step.properties.base_image_trigger, BaseImageTriggerType.runtime.value)
self.assertEqual(build_step.properties.provisioning_state, ProvisioningState.succeeded.value)
@ResourceGroupPreparer(location=DEFAULT_LOCATION)
def _disabled_test_build_task(self, resource_group, location):
registry_name = self.get_resource_name('pyacr')
build_task_name = self.get_resource_name('pyacr')
# Create a managed registry
self._create_managed_registry(registry_name, resource_group.name, location)
# Create a build task
self._create_build_task(build_task_name, registry_name, resource_group.name, location)
# List build tasks
build_tasks = list(self.client.build_tasks.list(resource_group.name, registry_name))
self.assertEqual(len(build_tasks), 1)
# Get the build task source repository properties
source_repository_properties = self.client.build_tasks.list_source_repository_properties(
resource_group_name=resource_group.name,
registry_name=registry_name,
build_task_name=build_task_name)
self.assertEqual(source_repository_properties.repository_url, DEFAULT_REPOSITORY_URL)
self.assertEqual(source_repository_properties.source_control_type, SourceControlType.github.value)
self.assertEqual(source_repository_properties.is_commit_trigger_enabled, True)
self.assertEqual(source_repository_properties.source_control_type, SourceControlType.github.value)
self.assertEqual(source_repository_properties.source_control_auth_properties.token, DEFAULT_GIT_ACCESS_TOKEN)
self.assertEqual(source_repository_properties.source_control_auth_properties.token_type, TokenType.pat)
# Update the build task
build_task_update_parameters = BuildTaskUpdateParameters(
alias=build_task_name,
source_repository=SourceRepositoryUpdateParameters(
is_commit_trigger_enabled=False
),
platform=PlatformProperties(os_type=OsType.windows, cpu=1),
status=BuildTaskStatus.disabled,
timeout=10000
)
build_task = self.client.build_tasks.update(
resource_group_name=resource_group.name,
registry_name=registry_name,
build_task_name=build_task_name,
build_task_update_parameters=build_task_update_parameters
).result()
self.assertEqual(build_task.name, build_task_name)
self.assertEqual(build_task.location, location)
self.assertEqual(build_task.platform.os_type, OsType.windows.value)
self.assertEqual(build_task.platform.cpu, 1)
self.assertEqual(build_task.provisioning_state, ProvisioningState.succeeded.value)
self.assertEqual(build_task.status, BuildTaskStatus.disabled.value)
self.assertEqual(build_task.timeout, 10000)
self.assertEqual(build_task.source_repository.repository_url, DEFAULT_REPOSITORY_URL)
self.assertEqual(build_task.source_repository.source_control_type, SourceControlType.github.value)
self.assertEqual(build_task.source_repository.is_commit_trigger_enabled, False)
# Get the build task
build_task = self.client.build_tasks.get(
resource_group_name=resource_group.name,
registry_name=registry_name,
build_task_name=build_task_name)
self.assertEqual(build_task.name, build_task_name)
self.assertEqual(build_task.location, location)
self.assertEqual(build_task.platform.os_type, OsType.windows.value)
self.assertEqual(build_task.platform.cpu, 1)
self.assertEqual(build_task.provisioning_state, ProvisioningState.succeeded.value)
self.assertEqual(build_task.status, BuildTaskStatus.disabled.value)
self.assertEqual(build_task.timeout, 10000)
self.assertEqual(build_task.source_repository.repository_url, DEFAULT_REPOSITORY_URL)
self.assertEqual(build_task.source_repository.source_control_type, SourceControlType.github.value)
self.assertEqual(build_task.source_repository.is_commit_trigger_enabled, False)
# Delete the build task
self.client.build_tasks.delete(resource_group.name, registry_name, build_task_name).wait()
self.client.registries.delete(resource_group.name, registry_name).wait()
@ResourceGroupPreparer(location=DEFAULT_LOCATION)
def _disabled_test_build_step(self, resource_group, location):
registry_name = self.get_resource_name('pyacr')
build_task_name = self.get_resource_name('pyacr')
build_step_name = self.get_resource_name('pyacr')
# Create a managed registry
self._create_managed_registry(registry_name, resource_group.name, location)
# Create a build task
self._create_build_task(build_task_name, registry_name, resource_group.name, location)
# Create a build step
self._create_build_step(build_step_name, build_task_name, registry_name, resource_group.name, location)
# List build steps
build_steps = list(self.client.build_steps.list(resource_group.name, registry_name, build_task_name))
self.assertEqual(len(build_steps), 1)
# Update the build step
build_step_update_parameters = DockerBuildStepUpdateParameters(
branch='dev',
image_names=['repo1:tag1', 'repo2:tag2'],
is_push_enabled=False,
no_cache=True,
docker_file_path='src\Dockerfile',
build_arguments=[
BuildArgument(name='key1', value='value1', is_secret=False),
BuildArgument(name='key2', value='value2', is_secret=True)
],
base_image_trigger=BaseImageTriggerType.none
)
build_step = self.client.build_steps.update(
resource_group_name=resource_group.name,
registry_name=registry_name,
build_task_name=build_task_name,
step_name=build_step_name,
properties=build_step_update_parameters
).result()
self.assertEqual(build_step.name, build_step_name)
self.assertEqual(build_step.properties.branch, 'dev')
self.assertEqual(build_step.properties.image_names, ['repo1:tag1', 'repo2:tag2'])
self.assertEqual(build_step.properties.is_push_enabled, False)
self.assertEqual(build_step.properties.no_cache, True)
self.assertEqual(build_step.properties.docker_file_path, 'src\Dockerfile')
self.assertEqual(build_step.properties.build_arguments[0].name, 'key1')
self.assertEqual(build_step.properties.build_arguments[0].value, 'value1')
self.assertEqual(build_step.properties.build_arguments[0].is_secret, False)
self.assertEqual(build_step.properties.base_image_trigger, BaseImageTriggerType.none.value)
self.assertEqual(build_step.properties.provisioning_state, ProvisioningState.succeeded.value)
# Get the build step
build_step = self.client.build_steps.get(
resource_group_name=resource_group.name,
registry_name=registry_name,
build_task_name=build_task_name,
step_name=build_step_name)
self.assertEqual(build_step.name, build_step_name)
self.assertEqual(build_step.properties.branch, 'dev')
self.assertEqual(build_step.properties.image_names, ['repo1:tag1', 'repo2:tag2'])
self.assertEqual(build_step.properties.is_push_enabled, False)
self.assertEqual(build_step.properties.no_cache, True)
self.assertEqual(build_step.properties.docker_file_path, 'src\Dockerfile')
self.assertEqual(build_step.properties.build_arguments[0].name, 'key1')
self.assertEqual(build_step.properties.build_arguments[0].value, 'value1')
self.assertEqual(build_step.properties.build_arguments[0].is_secret, False)
self.assertEqual(build_step.properties.base_image_trigger, BaseImageTriggerType.none.value)
self.assertEqual(build_step.properties.provisioning_state, ProvisioningState.succeeded.value)
# Get the build step build arguments
build_arguments = list(self.client.build_steps.list_build_arguments(
resource_group_name=resource_group.name,
registry_name=registry_name,
build_task_name=build_task_name,
step_name=build_step_name))
self.assertEqual(len(build_arguments), 2)
# Delete the build step
self.client.build_steps.delete(resource_group.name, registry_name, build_task_name, build_step_name).wait()
self.client.build_tasks.delete(resource_group.name, registry_name, build_task_name).wait()
self.client.registries.delete(resource_group.name, registry_name).wait()
@ResourceGroupPreparer(location=DEFAULT_LOCATION)
def test_build(self, resource_group, location):
registry_name = self.get_resource_name('pyacr')
# Create a managed registry
self._create_managed_registry(registry_name, resource_group.name, location)
build_request = QuickBuildRequest(
source_location=DEFAULT_REPOSITORY_URL,
platform=PlatformProperties(os_type='Linux'),
docker_file_path='Dockerfile',
image_names=['repo:tag'],
is_push_enabled=True,
timeout=3600,
build_arguments=[])
# Get build source upload url
self.client.registries.get_build_source_upload_url(
resource_group_name=resource_group.name,
registry_name=registry_name)
# Queue a build
queued_build = self.client.registries.queue_build(
resource_group_name=resource_group.name,
registry_name=registry_name,
build_request=build_request).result()
build_id = queued_build.build_id
# List builds
builds = list(self.client.builds.list(
resource_group_name=resource_group.name,
registry_name=registry_name))
self.assertEqual(len(builds), 1)
self.assertEqual(builds[0].build_id, build_id)
self.assertEqual(builds[0].build_type, BuildType.quick_build.value)
# Get the build
build = self.client.builds.get(
resource_group_name=resource_group.name,
registry_name=registry_name,
build_id=build_id)
self.assertEqual(build.build_id, build_id)
self.assertEqual(build.build_type, BuildType.quick_build.value)
self.assertEqual(build.is_archive_enabled, False)
# Update the build
build = self.client.builds.update(
resource_group_name=resource_group.name,
registry_name=registry_name,
build_id=build_id,
is_archive_enabled=True).result()
self.assertEqual(build.build_id, build_id)
self.assertEqual(build.build_type, BuildType.quick_build.value)
self.assertEqual(build.is_archive_enabled, True)
# Get log link
self.client.builds.get_log_link(
resource_group_name=resource_group.name,
registry_name=registry_name,
build_id=build_id)
# Cancel a build
self.client.builds.cancel(
resource_group_name=resource_group.name,
registry_name=registry_name,
build_id=build_id).wait()
# Delete the registry
self.client.registries.delete(resource_group.name, registry_name).wait()
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "720edcab90ba44d5be1926061addb7f3",
"timestamp": "",
"source": "github",
"line_count": 550,
"max_line_length": 117,
"avg_line_length": 44.52,
"alnum_prop": 0.670505595033897,
"repo_name": "Azure/azure-sdk-for-python",
"id": "e8298d6a3f6081b85b801cd6ef686bba99104172",
"size": "24488",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/containerregistry/azure-mgmt-containerregistry/tests/disable_test_mgmt_containerregistry_2018_02_01_preview.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
class thisislog(object):
def __init__(self):
self.paths = []
def getCusPath(self):
with open('cusPath', 'r') as f:
for line in f:
self.paths.append(line)
def showPathOptions(self):
self.getCusPath()
for pathNo, cusPath in enumerate(self.paths):
print pathNo, ': ', cusPath
def run(self):
self.showPathOptions()
while True:
cusOption = input('Please enter your path to show: ')
if cusOption == "q":
break
print cusOption
if __name__ == '__main__':
a = thisislog()
a.run() | {
"content_hash": "5d1f793a17c851a951113aab449256dc",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 65,
"avg_line_length": 22.137931034482758,
"alnum_prop": 0.5093457943925234,
"repo_name": "m170897017/thisislog",
"id": "6705dcf27d7537048262215be4ff4c897d01a5c3",
"size": "688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Start.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "688"
}
],
"symlink_target": ""
} |
class Solution(object):
def readBinaryWatch(self, num):
"""
:type num: int
:rtype: List[str]
The bin() method
"""
return_list = []
for hour in range(12):
for minute in range(60):
if sum(map(lambda number: int(bin(number)[2:].count('1')), [hour, minute])) == num:
return_list += [str(hour) + ":" + str(minute).zfill(2)]
return return_list
class Solution1(object):
def readBinaryWatch(self, num):
"""
:type num: int
:rtype: List[str]
x = x & (x - 1): turn off the rightmost 1
"""
def bit_count(binnum):
count = 0
while binnum:
binnum &= binnum - 1
count += 1
return count
return_list = []
for hour in range(12):
for minute in range(60):
if bit_count(hour) + bit_count(minute) == num:
return_list += ['{}:{}'.format(str(hour), str(minute).zfill(2))]
return return_list
class Solution2(object):
def readBinaryWatch(self, num):
"""
:type num: int
:rtype: List[str]
"""
def bit_count(binnum):
count = 0
while binnum:
binnum &= binnum - 1
count += 1
return count
return ['{}:{}'.format(str(hour), str(minute).zfill(2)) for hour in range(12) for minute in range(60) if bit_count(hour) + bit_count(minute) == num] | {
"content_hash": "b688a3c95d4bcb7098375ff629841f1a",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 156,
"avg_line_length": 31.26,
"alnum_prop": 0.4708893154190659,
"repo_name": "aenon/OnlineJudge",
"id": "c02e51b27407f77c28270d1febc6c14266ced00f",
"size": "2498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leetcode/5.BitManipulation/401.BinaryWatch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "18264"
},
{
"name": "Jupyter Notebook",
"bytes": "8287"
},
{
"name": "Python",
"bytes": "24381"
}
],
"symlink_target": ""
} |
import ast
import os
from Bio.Blast import NCBIXML
from Bio.Blast.Applications import NcbiblastnCommandline, NcbitblastnCommandline, NcbiblastpCommandline, \
NcbiblastxCommandline
from django.shortcuts import render
from blastplus import utils
from blastplus.forms import BlastForm, TBlastnForm, BlastpForm, BlastxForm
from blastplus.settings import BLAST_CORRECT_PARAMS
from blastplus.settings import BLAST_DB_NUCL_LIST
from blastplus.settings import EVALUE_BLAST_DEFAULT, BLAST_MAX_NUMBER_SEQ_IN_INPUT
from blastplus.settings import EXAMPLE_FASTA_NUCL_FILE_PATH, EXAMPLE_FASTA_PROT_FILE_PATH
def blast(request, blast_form, template_init, template_result, blast_commandline, sample_fasta_path,
extra_context=None):
"""
Process blastn/tblastn (blast+) query or set up initial blast form.
"""
if request.method == 'POST':
form = blast_form(request.POST)
if form.is_valid():
query_file_object_tmp = form.cleaned_data['sequence_in_form']
evalue = float(form.cleaned_data['evalue_in_form'])
word_size = int(form.cleaned_data['word_size_in_form'])
database_path = str(form.cleaned_data['blast_db_in_form'])
standard_opt_dic = {'query': query_file_object_tmp, 'evalue': evalue, 'outfmt': 5, 'db': database_path,
'word_size': word_size}
annotated = utils.get_annotation(database_path, BLAST_DB_NUCL_LIST)
# none standard options:
try:
matrix = str(form.cleaned_data['matrix_in_form'])
standard_opt_dic["matrix"] = matrix
except:
pass
sensitivity_opt_dic = ast.literal_eval(str(form.cleaned_data['search_sensitivity_in_form']))
blast_records__file_xml = None
try:
# blast search, parse results from temp file, put them into template for rendering.
blast_records__file_xml, blast_error = utils.run_blast_commands(blast_commandline,
**dict(standard_opt_dic,
**sensitivity_opt_dic))
if len(blast_error) > 0:
return render(request=request, template_name=template_result,
context={"blast_record": '', blast_error: BLAST_CORRECT_PARAMS})
else:
# converts blast results into objects and pack into list
blast_records_in_object_and_list = utils.blast_records_to_object(
list(NCBIXML.parse(blast_records__file_xml)))
# user defined function to modify blast results
# e.g. join blast results with external database in template
if extra_context is not None:
blast_records_in_object_and_list = extra_context(blast_records_in_object_and_list)
return render(request=request, template_name=template_result,
context={'application': blast_records_in_object_and_list[0].application,
'version': blast_records_in_object_and_list[0].version,
'blast_records': blast_records_in_object_and_list,
'annotated': annotated})
finally:
# remove result - temporary file
if blast_records__file_xml is not None:
os.remove(blast_records__file_xml.name)
else:
form = blast_form(initial={'sequence_in_form': '', 'evalue_in_form': EVALUE_BLAST_DEFAULT})
return render(request=request, template_name=template_init,
context={'form': form, 'sequence_sample_in_fasta': utils.get_sample_data(sample_fasta_path),
"blast_max_number_seq_in_input": BLAST_MAX_NUMBER_SEQ_IN_INPUT, })
def tblastn(request, blast_form=TBlastnForm, template_init='blastplus/blast.html',
template_result='blastplus/blast_results.html', extra_context=None):
return blast(request, blast_form=blast_form, template_init=template_init, template_result=template_result,
blast_commandline=NcbitblastnCommandline, sample_fasta_path=EXAMPLE_FASTA_PROT_FILE_PATH,
extra_context=extra_context)
def blastn(request, blast_form=BlastForm, template_init='blastplus/blast.html',
template_result='blastplus/blast_results.html', extra_context=None):
return blast(request, blast_form=blast_form, template_init=template_init, template_result=template_result,
blast_commandline=NcbiblastnCommandline, sample_fasta_path=EXAMPLE_FASTA_NUCL_FILE_PATH,
extra_context=extra_context)
def blastp(request, blast_form=BlastpForm, template_init='blastplus/blast.html',
template_result='blastplus/blast_results.html', extra_context=None):
return blast(request, blast_form=blast_form, template_init=template_init, template_result=template_result,
blast_commandline=NcbiblastpCommandline, sample_fasta_path=EXAMPLE_FASTA_PROT_FILE_PATH,
extra_context=extra_context)
def blastx(request, blast_form=BlastxForm, template_init='blastplus/blast.html',
template_result='blastplus/blast_results.html', extra_context=None):
return blast(request, blast_form=blast_form, template_init=template_init, template_result=template_result,
blast_commandline=NcbiblastxCommandline, sample_fasta_path=EXAMPLE_FASTA_NUCL_FILE_PATH,
extra_context=extra_context)
| {
"content_hash": "2d48ae040afa7e20fad19e24bcf9eca5",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 115,
"avg_line_length": 50.58771929824562,
"alnum_prop": 0.620253164556962,
"repo_name": "michal-stuglik/django-blastplus",
"id": "eecc77e736b42365a43ca8375727d08a4116b9ae",
"size": "5767",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blastplus/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "682"
},
{
"name": "HTML",
"bytes": "9246"
},
{
"name": "Makefile",
"bytes": "277"
},
{
"name": "Python",
"bytes": "34210"
},
{
"name": "Shell",
"bytes": "298"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import bisect
from .gtfsfactoryuser import GtfsFactoryUser
from . import problems as problems_module
from . import util
class Shape(GtfsFactoryUser):
"""This class represents a geographic shape that corresponds to the route
taken by one or more Trips."""
_REQUIRED_FIELD_NAMES = ['shape_id', 'shape_pt_lat', 'shape_pt_lon',
'shape_pt_sequence']
_FIELD_NAMES = _REQUIRED_FIELD_NAMES + ['shape_dist_traveled']
_DEPRECATED_FIELD_NAMES = []
def __init__(self, shape_id):
# List of shape point tuple (lat, lng, shape_dist_traveled), where lat and
# lon is the location of the shape point, and shape_dist_traveled is an
# increasing metric representing the distance traveled along the shape.
self.points = []
# An ID that uniquely identifies a shape in the dataset.
self.shape_id = shape_id
# The max shape_dist_traveled of shape points in this shape.
self.max_distance = 0
# List of shape_dist_traveled of each shape point.
self.distance = []
# List of shape_pt_sequence of each shape point.
self.sequence = []
def AddPoint(self, lat, lon, distance=None,
problems=problems_module.default_problem_reporter):
shapepoint_class = self.GetGtfsFactory().ShapePoint
shapepoint = shapepoint_class(
self.shape_id, lat, lon, len(self.sequence), distance)
if shapepoint.ParseAttributes(problems):
self.AddShapePointObjectUnsorted(shapepoint, problems)
def AddShapePointObjectUnsorted(self, shapepoint, problems):
"""Insert a point into a correct position by sequence. """
if (len(self.sequence) == 0 or
shapepoint.shape_pt_sequence >= self.sequence[-1]):
index = len(self.sequence)
elif shapepoint.shape_pt_sequence <= self.sequence[0]:
index = 0
else:
index = bisect.bisect(self.sequence, shapepoint.shape_pt_sequence)
if shapepoint.shape_pt_sequence in self.sequence:
problems.InvalidValue('shape_pt_sequence', shapepoint.shape_pt_sequence,
'The sequence number %d occurs more than once in '
'shape %s.' %
(shapepoint.shape_pt_sequence, self.shape_id))
if shapepoint.shape_dist_traveled is not None and len(self.sequence) > 0:
if (index != len(self.sequence) and
shapepoint.shape_dist_traveled > self.distance[index]):
problems.InvalidValue('shape_dist_traveled',
shapepoint.shape_dist_traveled,
'Each subsequent point in a shape should have '
'a distance value that shouldn\'t be larger '
'than the next ones. In this case, the next '
'distance was %f.' % self.distance[index])
if (index > 0 and
shapepoint.shape_dist_traveled < self.distance[index - 1]):
problems.InvalidValue('shape_dist_traveled',
shapepoint.shape_dist_traveled,
'Each subsequent point in a shape should have '
'a distance value that\'s at least as large as '
'the previous ones. In this case, the previous '
'distance was %f.' % self.distance[index - 1])
if shapepoint.shape_dist_traveled > self.max_distance:
self.max_distance = shapepoint.shape_dist_traveled
self.sequence.insert(index, shapepoint.shape_pt_sequence)
self.distance.insert(index, shapepoint.shape_dist_traveled)
self.points.insert(index, (shapepoint.shape_pt_lat,
shapepoint.shape_pt_lon,
shapepoint.shape_dist_traveled))
def ClearPoints(self):
self.points = []
def __eq__(self, other):
if not other:
return False
if id(self) == id(other):
return True
return self.points == other.points
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "<Shape %s>" % self.__dict__
def ValidateShapeId(self, problems):
if util.IsEmpty(self.shape_id):
problems.MissingValue('shape_id')
def ValidateShapePoints(self, problems):
if not self.points:
problems.OtherProblem('The shape with shape_id "%s" contains no points.' %
self.shape_id, type=problems_module.TYPE_WARNING)
def Validate(self, problems=problems_module.default_problem_reporter):
self.ValidateShapeId(problems)
self.ValidateShapePoints(problems)
def GetPointWithDistanceTraveled(self, shape_dist_traveled):
"""Returns a point on the shape polyline with the input shape_dist_traveled.
Args:
shape_dist_traveled: The input shape_dist_traveled.
Returns:
The shape point as a tuple (lat, lng, shape_dist_traveled), where lat and
lng is the location of the shape point, and shape_dist_traveled is an
increasing metric representing the distance traveled along the shape.
Returns None if there is data error in shape.
"""
if not self.distance:
return None
if shape_dist_traveled <= self.distance[0]:
return self.points[0]
if shape_dist_traveled >= self.distance[-1]:
return self.points[-1]
index = bisect.bisect(self.distance, shape_dist_traveled)
(lat0, lng0, dist0) = self.points[index - 1]
(lat1, lng1, dist1) = self.points[index]
# Interpolate if shape_dist_traveled does not equal to any of the point
# in shape segment.
# (lat0, lng0) (lat, lng) (lat1, lng1)
# -----|--------------------|---------------------|------
# dist0 shape_dist_traveled dist1
# \------- ca --------/ \-------- bc -------/
# \----------------- ba ------------------/
ca = shape_dist_traveled - dist0
bc = dist1 - shape_dist_traveled
ba = bc + ca
if ba == 0:
# This only happens when there's data error in shapes and should have been
# catched before. Check to avoid crash.
return None
# This won't work crossing longitude 180 and is only an approximation which
# works well for short distance.
lat = (lat1 * ca + lat0 * bc) / ba
lng = (lng1 * ca + lng0 * bc) / ba
return (lat, lng, shape_dist_traveled)
| {
"content_hash": "79540102a7d5f100591a4c26fb3ca814",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 80,
"avg_line_length": 41.38961038961039,
"alnum_prop": 0.614841543771572,
"repo_name": "avilaton/transitfeed",
"id": "35cfb2075c5f58fc211c80bb23decccc5f01d809",
"size": "6977",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "transitfeed/shape.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2638"
},
{
"name": "HTML",
"bytes": "2164"
},
{
"name": "JavaScript",
"bytes": "108520"
},
{
"name": "Python",
"bytes": "1039665"
},
{
"name": "Visual Basic",
"bytes": "357"
}
],
"symlink_target": ""
} |
import os
import sys
import re
import inspect
import cgi
validExtensions = (".cpp", ".h", ".js")
# specify the paths in which docublocks are searched. note that js/apps/* must not be included because it contains js/apps/system/
# and that path also contains copies of some files present in js/ anyway.
searchPaths = ["arangod/", "lib/", "js/actions", "js/client", "js/apps/system/_system/cerberus", "js/apps/system/_api/gharial", "js/common", "js/server"]
fullSuccess = True
def file_content(filepath):
""" Fetches and formats file's content to perform the required operation.
"""
infile = open(filepath, 'r')
filelines = tuple(infile)
infile.close()
comment_indexes = []
comments = []
for line in enumerate(filelines):
if "@startDocuBlock" in line[1]:
_start = line[0]
if "@endDocuBlock" in line[1]:
_end = line[0] + 1
comment_indexes.append([_start, _end])
for index in comment_indexes:
comments.append(filelines[index[0]: index[1]])
return comments
def example_content(filepath, fh, tag):
""" Fetches an example file and inserts it using code
"""
arangosh = False
curl = False
first = True
lastline = None
long = ""
longLines = 0
short = ""
shortLines = 0
shortable = False
showdots = True
CURL_STATE_CMD = 1
CURL_STATE_HEADER = 2
CURL_STATE_BODY = 3
# read in the context, split into long and short
infile = open(filepath, 'r')
for line in infile:
if first:
arangosh = line.startswith("arangosh>")
curl = line.startswith("shell> curl")
first = False
if arangosh:
if line.startswith("arangosh>") or line.startswith("........>"):
if lastline != None:
# short = short + lastline
# shortLines = shortLines + 1
lastline = None
short = short + line
shortLines = shortLines + 1
showdots = True
else:
if showdots:
if lastline == None:
# lastline = line
shortable = True
showdots = False
lastline = None
else:
# short = short + "~~~hidden~~~\n"
# shortLines = shortLines + 1
shortable = True
showdots = False
lastline = None
if curl:
if line.startswith("shell> curl"):
curlState = CURL_STATE_CMD
elif curlState == CURL_STATE_CMD and line.startswith("HTTP/1.1 "):
curlState = CURL_STATE_HEADER
elif curlState == CURL_STATE_HEADER and line.startswith("{"):
curlState = CURL_STATE_BODY
if curlState == CURL_STATE_CMD or curlState == CURL_STATE_HEADER:
line = cgi.escape(line)
short = short + line
shortLines = shortLines + 1
else:
shortable = True
long = long + line
longLines = longLines + 1
if lastline != None:
short = short + lastline
shortLines = shortLines + 1
infile.close()
if longLines - shortLines < 5:
shortable = False
# write example
fh.write("\n")
fh.write("<div id=\"%s_container\">\n" % tag)
longTag = "%s_long" % tag
shortTag = "%s_short" % tag
longToggle = ""
shortToggle = "$('#%s').hide(); $('#%s').show();" % (shortTag, longTag)
if shortable:
fh.write("<div id=\"%s\" onclick=\"%s\" style=\"Display: none;\">\n" % (longTag, longToggle))
else:
fh.write("<div id=\"%s\">\n" % longTag)
fh.write("<pre>\n")
# fh.write("```\n")
fh.write("%s" % long)
# fh.write("```\n")
fh.write("</pre>\n")
fh.write("</div>\n")
if shortable:
fh.write("<div id=\"%s\" onclick=\"%s\">\n" % (shortTag, shortToggle))
fh.write("<pre>\n")
# fh.write("```\n")
fh.write("%s" % short)
# fh.write("```\n")
if arangosh:
fh.write("</pre><div class=\"example_show_button\">show execution results</div>\n")
elif curl:
fh.write("</pre><div class=\"example_show_button\">show response body</div>\n")
else:
fh.write("</pre><div class=\"example_show_button\">show</div>\n")
fh.write("</div>\n")
fh.write("</div>\n")
fh.write("\n")
def fetch_comments(dirpath):
""" Fetches comments from files and writes to a file in required format.
"""
global fullSuccess
global validExtensions
comments_filename = "allComments.txt"
fh = open(comments_filename, "a")
shouldIgnoreLine = False;
for root, directories, files in os.walk(dirpath):
for filename in files:
if filename.endswith(validExtensions):
filepath = os.path.join(root, filename)
file_comments = file_content(filepath)
for comment in file_comments:
fh.write("\n<!-- filename: %s -->\n" % filename)
for _com in comment:
_text = re.sub(r"//(/)+\s*\n", "<br />", _com)
_text = re.sub(r"///+(\s+\s+)([-\*\d])", r" \2", _text)
_text = re.sub(r"///\s", "", _text)
_text = _text.strip("\n")
if _text:
if not shouldIgnoreLine:
if ("@startDocuBlock" in _text) or \
("@endDocuBlock" in _text):
fh.write("%s\n\n" % _text)
elif ("@EXAMPLE_ARANGOSH_OUTPUT" in _text or \
"@EXAMPLE_ARANGOSH_RUN" in _text):
shouldIgnoreLine = True
_filename = re.search("{(.*)}", _text).group(1)
dirpath = os.path.abspath(os.path.join(os.path.dirname( __file__ ), os.pardir, "Examples", _filename + ".generated"))
if os.path.isfile(dirpath):
example_content(dirpath, fh, _filename)
else:
fullSuccess = False
print "Could not find the generated example for " + _filename + " found in " + filepath
else:
fh.write("%s\n" % _text)
elif ("@END_EXAMPLE_ARANGOSH_OUTPUT" in _text or \
"@END_EXAMPLE_ARANGOSH_RUN" in _text):
shouldIgnoreLine = False
fh.close()
if __name__ == "__main__":
errorsFile = open("../../lib/Basics/errors.dat", "r")
commentsFile = open("allComments.txt", "w")
commentsFile.write("@startDocuBlock errorCodes \n")
for line in errorsFile:
commentsFile.write(line + "\n")
commentsFile.write("@endDocuBlock \n")
commentsFile.close()
errorsFile.close()
for i in searchPaths:
print "Searching for docublocks in " + i + ": "
dirpath = os.path.abspath(os.path.join(os.path.dirname( __file__ ), os.pardir,"ArangoDB/../../"+i))
fetch_comments(dirpath)
os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'templates'))
if not fullSuccess:
sys.exit(1)
| {
"content_hash": "9003586ac4c96b71a6f61c3680ca5363",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 153,
"avg_line_length": 30.74537037037037,
"alnum_prop": 0.5699442854991719,
"repo_name": "kkdd/arangodb",
"id": "5e7d03be4edfe08aaebb14ed2382d9d2f87686f2",
"size": "6641",
"binary": false,
"copies": "3",
"ref": "refs/heads/devel",
"path": "Documentation/Books/codeBlockReader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "89080"
},
{
"name": "AppleScript",
"bytes": "1429"
},
{
"name": "Assembly",
"bytes": "142084"
},
{
"name": "Batchfile",
"bytes": "9177"
},
{
"name": "C",
"bytes": "1903833"
},
{
"name": "C#",
"bytes": "55625"
},
{
"name": "C++",
"bytes": "79280231"
},
{
"name": "CLIPS",
"bytes": "5291"
},
{
"name": "CMake",
"bytes": "109246"
},
{
"name": "CSS",
"bytes": "1589317"
},
{
"name": "CoffeeScript",
"bytes": "94"
},
{
"name": "DIGITAL Command Language",
"bytes": "27303"
},
{
"name": "Emacs Lisp",
"bytes": "15477"
},
{
"name": "Go",
"bytes": "1018005"
},
{
"name": "Groff",
"bytes": "263567"
},
{
"name": "HTML",
"bytes": "321871"
},
{
"name": "JavaScript",
"bytes": "57408695"
},
{
"name": "LLVM",
"bytes": "39030"
},
{
"name": "Lua",
"bytes": "16189"
},
{
"name": "Makefile",
"bytes": "174615"
},
{
"name": "Module Management System",
"bytes": "1545"
},
{
"name": "NSIS",
"bytes": "26909"
},
{
"name": "Objective-C",
"bytes": "38849"
},
{
"name": "Objective-C++",
"bytes": "1857"
},
{
"name": "Pascal",
"bytes": "145262"
},
{
"name": "Perl",
"bytes": "227888"
},
{
"name": "Protocol Buffer",
"bytes": "5837"
},
{
"name": "Python",
"bytes": "3562620"
},
{
"name": "Ruby",
"bytes": "996387"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Scheme",
"bytes": "19885"
},
{
"name": "Shell",
"bytes": "486603"
},
{
"name": "VimL",
"bytes": "4075"
},
{
"name": "Yacc",
"bytes": "35766"
}
],
"symlink_target": ""
} |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Procoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Procoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| {
"content_hash": "e135d9f23aa33fff88b520c380917b39",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 79,
"avg_line_length": 24.185185185185187,
"alnum_prop": 0.6616896375701888,
"repo_name": "Procoin-project/Procoin-Done",
"id": "c0337e22ccbdad62c224ad81ba9cd4abf7b01dc9",
"size": "7836",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "contrib/bitrpc/bitrpc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "103297"
},
{
"name": "C++",
"bytes": "2522619"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "IDL",
"bytes": "14692"
},
{
"name": "Objective-C",
"bytes": "5864"
},
{
"name": "Python",
"bytes": "37260"
},
{
"name": "Shell",
"bytes": "2527"
},
{
"name": "TypeScript",
"bytes": "5232065"
}
],
"symlink_target": ""
} |
from google.cloud import aiplatform
def create_custom_job_sample(
project: str,
display_name: str,
container_image_uri: str,
location: str = "us-central1",
api_endpoint: str = "us-central1-aiplatform.googleapis.com",
):
# The AI Platform services require regional API endpoints.
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.JobServiceClient(client_options=client_options)
custom_job = {
"display_name": display_name,
"job_spec": {
"worker_pool_specs": [
{
"machine_spec": {
"machine_type": "n1-standard-4",
"accelerator_type": aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80,
"accelerator_count": 1,
},
"replica_count": 1,
"container_spec": {
"image_uri": container_image_uri,
"command": [],
"args": [],
},
}
]
},
}
parent = f"projects/{project}/locations/{location}"
response = client.create_custom_job(parent=parent, custom_job=custom_job)
print("response:", response)
# [END aiplatform_create_custom_job_sample]
| {
"content_hash": "9f1d786eaf8ef1c0453f313f9581ccd8",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 94,
"avg_line_length": 36.26829268292683,
"alnum_prop": 0.5447209145931405,
"repo_name": "sasha-gitg/python-aiplatform",
"id": "e15ecce26ba7d01f07ffbb314347a376111de45b",
"size": "2109",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "samples/snippets/job_service/create_custom_job_sample.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1700"
},
{
"name": "Python",
"bytes": "11216304"
},
{
"name": "Shell",
"bytes": "30838"
}
],
"symlink_target": ""
} |
from setuptools import setup
from setuptools.command.build_py import build_py as setuptools_build_py
# these lines allow the version to be specified in Makefile.private
import os
version = os.environ.get("MODULEVER", "0.0")
# find the build scripts
savecwd = os.getcwd()
os.chdir("dls_ade")
build_scripts = []
for root, subFolders, files in os.walk("dlsbuild_scripts"):
build_scripts += [os.path.join(root, x) for x in files if x.endswith(".bat") or x.endswith(".sh") ]
template_files = []
for root, _, files in os.walk("module_templates"):
template_files += [os.path.join(root, x) for x in files]
for root, _, files in os.walk("cookiecutter_templates"):
template_files += [os.path.join(root, x) for x in files]
additional_files = build_scripts + template_files
os.chdir(savecwd)
class BuildPreservingPackageDataMode(setuptools_build_py):
def build_package_data(self):
"""Copy data files into build directory"""
# Copied from distutils
for package, src_dir, build_dir, filenames in self.data_files:
for filename in filenames:
target = os.path.join(build_dir, filename)
self.mkpath(os.path.dirname(target))
self.copy_file(os.path.join(src_dir, filename), target,
preserve_mode=True)
setup(
# name of the module
name="dls_ade",
cmdclass={
'build_py': BuildPreservingPackageDataMode
},
# version: over-ridden by the release script
version=version,
description='DLS Controls Group Application Development Environment scripts',
author='Diamond Light Source Controls Group',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
],
license='APACHE',
install_requires=['GitPython==2.1.8', 'python-ldap==3.1.0', 'six==1.10.0',
'pygelf==0.3.1', 'cookiecutter==1.6.0',
'python-gitlab==1.6.0'],
packages=["dls_ade"],
package_data={"dls_ade": additional_files},
# define console_scripts
entry_points={'console_scripts':
['dls-changes-since-release.py = dls_ade.dls_changes_since_release:main',
'dls-checkout-module.py = dls_ade.dls_checkout_module:main',
'dls-list-branches.py = dls_ade.dls_list_branches:main',
'dls-list-modules.py = dls_ade.dls_list_modules:main',
'dls-list-releases.py = dls_ade.dls_list_releases:main',
'dls-logs-since-release.py = dls_ade.dls_logs_since_release:main',
'dls-module-contacts.py = dls_ade.dls_module_contacts:main',
'dls-release.py = dls_ade.dls_release:main',
'dls-start-new-module.py = dls_ade.dls_start_new_module:main',
'dls-tar-module.py = dls_ade.dls_tar_module:main']},
include_package_data=True,
tests_require=['nose', 'mock'],
test_suite='nose.collector',
zip_safe=False
)
| {
"content_hash": "0c9b6ab61b9207c614a8ad74337ba5fa",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 103,
"avg_line_length": 38.95061728395062,
"alnum_prop": 0.6167987321711569,
"repo_name": "dls-controls/dls_ade",
"id": "7895c147276026375041ab99e621a2994c159887",
"size": "3155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9112"
},
{
"name": "C++",
"bytes": "462"
},
{
"name": "Makefile",
"bytes": "6136"
},
{
"name": "Python",
"bytes": "584551"
},
{
"name": "Shell",
"bytes": "27504"
}
],
"symlink_target": ""
} |
import telnetlib
class PyNUTError( Exception ) :
""" Base class for custom exceptions """
class PyNUTClient :
""" Abstraction class to access NUT (Network UPS Tools) server """
__debug = None # Set class to debug mode (prints everything useful for debuging...)
__host = None
__port = None
__login = None
__password = None
__timeout = None
__srv_handler = None
__version = "1.3.0"
__release = "2014-06-03"
def __init__( self, host="127.0.0.1", port=3493, login=None, password=None, debug=False, timeout=5 ) :
""" Class initialization method
host : Host to connect (default to localhost)
port : Port where NUT listens for connections (default to 3493)
login : Login used to connect to NUT server (default to None for no authentication)
password : Password used when using authentication (default to None)
debug : Boolean, put class in debug mode (prints everything on console, default to False)
timeout : Timeout used to wait for network response
"""
self.__debug = debug
if self.__debug :
print( "[DEBUG] Class initialization..." )
print( "[DEBUG] -> Host = %s (port %s)" % ( host, port ) )
print( "[DEBUG] -> Login = '%s' / '%s'" % ( login, password ) )
self.__host = host
self.__port = port
self.__login = login
self.__password = password
self.__timeout = 5
self.__connect()
# Try to disconnect cleanly when class is deleted ;)
def __del__( self ) :
""" Class destructor method """
try :
self.__srv_handler.write( "LOGOUT\n" )
except :
pass
def __connect( self ) :
""" Connects to the defined server
If login/pass was specified, the class tries to authenticate. An error is raised
if something goes wrong.
"""
if self.__debug :
print( "[DEBUG] Connecting to host" )
self.__srv_handler = telnetlib.Telnet( self.__host, self.__port )
if self.__login != None :
self.__srv_handler.write( "USERNAME %s\n" % self.__login )
result = self.__srv_handler.read_until( "\n", self.__timeout )
if result[:2] != "OK" :
raise PyNUTError( result.replace( "\n", "" ) )
if self.__password != None :
self.__srv_handler.write( "PASSWORD %s\n" % self.__password )
result = self.__srv_handler.read_until( "\n", self.__timeout )
if result[:2] != "OK" :
raise PyNUTError( result.replace( "\n", "" ) )
def GetUPSList( self ) :
""" Returns the list of available UPS from the NUT server
The result is a dictionary containing 'key->val' pairs of 'UPSName' and 'UPS Description'
"""
if self.__debug :
print( "[DEBUG] GetUPSList from server" )
self.__srv_handler.write( "LIST UPS\n" )
result = self.__srv_handler.read_until( "\n" )
if result != "BEGIN LIST UPS\n" :
raise PyNUTError( result.replace( "\n", "" ) )
result = self.__srv_handler.read_until( "END LIST UPS\n" )
ups_list = {}
for line in result.split( "\n" ) :
if line[:3] == "UPS" :
ups, desc = line[4:-1].split( '"' )
ups_list[ ups.replace( " ", "" ) ] = desc
return( ups_list )
def GetUPSVars( self, ups="" ) :
""" Get all available vars from the specified UPS
The result is a dictionary containing 'key->val' pairs of all
available vars.
"""
if self.__debug :
print( "[DEBUG] GetUPSVars called..." )
self.__srv_handler.write( "LIST VAR %s\n" % ups )
result = self.__srv_handler.read_until( "\n" )
if result != "BEGIN LIST VAR %s\n" % ups :
raise PyNUTError( result.replace( "\n", "" ) )
ups_vars = {}
result = self.__srv_handler.read_until( "END LIST VAR %s\n" % ups )
offset = len( "VAR %s " % ups )
end_offset = 0 - ( len( "END LIST VAR %s\n" % ups ) + 1 )
for current in result[:end_offset].split( "\n" ) :
var = current[ offset: ].split( '"' )[0].replace( " ", "" )
data = current[ offset: ].split( '"' )[1]
ups_vars[ var ] = data
return( ups_vars )
def GetUPSCommands( self, ups="" ) :
""" Get all available commands for the specified UPS
The result is a dict object with command name as key and a description
of the command as value
"""
if self.__debug :
print( "[DEBUG] GetUPSCommands called..." )
self.__srv_handler.write( "LIST CMD %s\n" % ups )
result = self.__srv_handler.read_until( "\n" )
if result != "BEGIN LIST CMD %s\n" % ups :
raise PyNUTError( result.replace( "\n", "" ) )
ups_cmds = {}
result = self.__srv_handler.read_until( "END LIST CMD %s\n" % ups )
offset = len( "CMD %s " % ups )
end_offset = 0 - ( len( "END LIST CMD %s\n" % ups ) + 1 )
for current in result[:end_offset].split( "\n" ) :
var = current[ offset: ].split( '"' )[0].replace( " ", "" )
# For each var we try to get the available description
try :
self.__srv_handler.write( "GET CMDDESC %s %s\n" % ( ups, var ) )
temp = self.__srv_handler.read_until( "\n" )
if temp[:7] != "CMDDESC" :
raise PyNUTError
else :
off = len( "CMDDESC %s %s " % ( ups, var ) )
desc = temp[off:-1].split('"')[1]
except :
desc = var
ups_cmds[ var ] = desc
return( ups_cmds )
def GetRWVars( self, ups="" ) :
""" Get a list of all writable vars from the selected UPS
The result is presented as a dictionary containing 'key->val' pairs
"""
if self.__debug :
print( "[DEBUG] GetUPSVars from '%s'..." % ups )
self.__srv_handler.write( "LIST RW %s\n" % ups )
result = self.__srv_handler.read_until( "\n" )
if ( result != "BEGIN LIST RW %s\n" % ups ) :
raise PyNUTError( result.replace( "\n", "" ) )
result = self.__srv_handler.read_until( "END LIST RW %s\n" % ups )
offset = len( "VAR %s" % ups )
end_offset = 0 - ( len( "END LIST RW %s\n" % ups ) + 1 )
rw_vars = {}
try :
for current in result[:end_offset].split( "\n" ) :
var = current[ offset: ].split( '"' )[0].replace( " ", "" )
data = current[ offset: ].split( '"' )[1]
rw_vars[ var ] = data
except :
pass
return( rw_vars )
def SetRWVar( self, ups="", var="", value="" ):
""" Set a variable to the specified value on selected UPS
The variable must be a writable value (cf GetRWVars) and you must have the proper
rights to set it (maybe login/password).
"""
self.__srv_handler.write( "SET VAR %s %s %s\n" % ( ups, var, value ) )
result = self.__srv_handler.read_until( "\n" )
if ( result == "OK\n" ) :
return( "OK" )
else :
raise PyNUTError( result )
def RunUPSCommand( self, ups="", command="" ) :
""" Send a command to the specified UPS
Returns OK on success or raises an error
"""
if self.__debug :
print( "[DEBUG] RunUPSCommand called..." )
self.__srv_handler.write( "INSTCMD %s %s\n" % ( ups, command ) )
result = self.__srv_handler.read_until( "\n" )
if ( result == "OK\n" ) :
return( "OK" )
else :
raise PyNUTError( result.replace( "\n", "" ) )
def FSD( self, ups="") :
""" Send FSD command
Returns OK on success or raises an error
"""
if self.__debug :
print( "[DEBUG] MASTER called..." )
self.__srv_handler.write( "MASTER %s\n" % ups )
result = self.__srv_handler.read_until( "\n" )
if ( result != "OK MASTER-GRANTED\n" ) :
raise PyNUTError( ( "Master level function are not available", "" ) )
if self.__debug :
print( "[DEBUG] FSD called..." )
self.__srv_handler.write( "FSD %s\n" % ups )
result = self.__srv_handler.read_until( "\n" )
if ( result == "OK FSD-SET\n" ) :
return( "OK" )
else :
raise PyNUTError( result.replace( "\n", "" ) )
def help(self) :
""" Send HELP command
"""
if self.__debug :
print( "[DEBUG] HELP called..." )
self.__srv_handler.write( "HELP\n")
return self.__srv_handler.read_until( "\n" )
def ver(self) :
""" Send VER command
"""
if self.__debug :
print( "[DEBUG] VER called..." )
self.__srv_handler.write( "VER\n")
return self.__srv_handler.read_until( "\n" )
def ListClients( self, ups = None ) :
""" Returns the list of connected clients from the NUT server
The result is a dictionary containing 'key->val' pairs of 'UPSName' and a list of clients
"""
if self.__debug :
print( "[DEBUG] ListClients from server" )
if ups and (ups not in self.GetUPSList()):
raise PyNUTError( "%s is not a valid UPS" % ups )
if ups:
self.__srv_handler.write( "LIST CLIENTS %s\n" % ups)
else:
self.__srv_handler.write( "LIST CLIENTS\n" )
result = self.__srv_handler.read_until( "\n" )
if result != "BEGIN LIST CLIENTS\n" :
raise PyNUTError( result.replace( "\n", "" ) )
result = self.__srv_handler.read_until( "END LIST CLIENTS\n" )
ups_list = {}
for line in result.split( "\n" ):
if line[:6] == "CLIENT" :
host, ups = line[7:].split(' ')
ups.replace(' ', '')
if not ups in ups_list:
ups_list[ups] = []
ups_list[ups].append(host)
return( ups_list )
| {
"content_hash": "9f03e7d89fcdef8f5ec00f041679e3b3",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 106,
"avg_line_length": 34.387205387205384,
"alnum_prop": 0.5116028591011456,
"repo_name": "ab77/beastcraft-telemetry",
"id": "9a9399cff3f8e35e0113fe20782e94d558e1d3ac",
"size": "11784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyNUT/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37816"
},
{
"name": "Shell",
"bytes": "1397"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_sql_database_info
description:
- Gather info for GCP Database
- This module was called C(gcp_sql_database_facts) before Ansible 2.9. The usage has
not changed.
short_description: Gather info for GCP Database
version_added: '2.8'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
instance:
description:
- The name of the Cloud SQL instance. This does not include the project ID.
required: true
type: str
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- for authentication, you can set service_account_file using the c(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the c(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: get info on a database
gcp_sql_database_info:
instance: "{{ instance.name }}"
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
resources:
description: List of resources
returned: always
type: complex
contains:
charset:
description:
- The charset value. See MySQL's [Supported Character Sets and Collations](U(https://dev.mysql.com/doc/refman/5.7/en/charset-charsets.html))
and Postgres' [Character Set Support](U(https://www.postgresql.org/docs/9.6/static/multibyte.html))
for more details and supported values. Postgres databases only support a value
of `UTF8` at creation time.
returned: success
type: str
collation:
description:
- The collation value. See MySQL's [Supported Character Sets and Collations](U(https://dev.mysql.com/doc/refman/5.7/en/charset-charsets.html))
and Postgres' [Collation Support](U(https://www.postgresql.org/docs/9.6/static/collation.html))
for more details and supported values. Postgres databases only support a value
of `en_US.UTF8` at creation time.
returned: success
type: str
name:
description:
- The name of the database in the Cloud SQL instance.
- This does not include the project ID or instance name.
returned: success
type: str
instance:
description:
- The name of the Cloud SQL instance. This does not include the project ID.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(instance=dict(required=True, type='str')))
if module._name == 'gcp_sql_database_facts':
module.deprecate("The 'gcp_sql_database_facts' module has been renamed to 'gcp_sql_database_info'", version='2.13')
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/sqlservice.admin']
return_value = {'resources': fetch_list(module, collection(module))}
module.exit_json(**return_value)
def collection(module):
return "https://www.googleapis.com/sql/v1beta4/projects/{project}/instances/{instance}/databases".format(**module.params)
def fetch_list(module, link):
auth = GcpSession(module, 'sql')
return auth.list(link, return_if_object, array_name='items')
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| {
"content_hash": "c0ff2ca4a60de3eac2bb1a8124a8f7f8",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 148,
"avg_line_length": 34.18333333333333,
"alnum_prop": 0.6387128230131643,
"repo_name": "thaim/ansible",
"id": "9be73fb2c4fa48159a8d3b46c90ce3656bfbe692",
"size": "6890",
"binary": false,
"copies": "3",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/cloud/google/gcp_sql_database_info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
'''
browser.py: part of nidmviewer package
Functions to visualize in browser
Copyright (c) 2014-2018, Vanessa Sochat
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from nidmviewer.utils import make_tmp_folder
import webbrowser
import shutil
import os
try:
import SimpleHTTPServer
import SocketServer
except:
import http.server as SimpleHTTPServer
import socketserver as SocketServer
'''
View code in temporary browser!
html_snippet is the template code with images subbed
copy_list is a dictionary,
with {nidm:{brainmap1_file:temp1_file,..brainmapN_file:tempN_file}}
'''
def view(html_snippet,copy_list,port):
with make_tmp_folder() as tmp_dir:
# First copy all brain maps
for real_path,temp_path in copy_list.items():
real_path = os.path.abspath(real_path.replace("file://",""))
shutil.copy(real_path,"%s/%s" %(tmp_dir,temp_path))
# Now write template to temporary file
tmp_file = "%s/pycompare.html" %(tmp_dir)
# Change directory and start a web server
os.chdir(tmp_dir)
print(os.getcwd())
write_file(html_snippet,tmp_file)
tmp_file_base = os.path.basename(tmp_file)
if port!=None:
httpd = run_webserver(html_page="%s" %(tmp_file_base),port=port)
else:
httpd = run_webserver(html_page="%s" %(tmp_file_base))
return httpd
'''Internal view function'''
def internal_view(html_snippet,tmp_file):
url = 'file://%s' %(tmp_file)
write_file(html_snippet, tmp_file)
webbrowser.open_new_tab(url)
raw_input("Press Enter to finish...")
def write_file(html_snippet, tmp_file):
html_file = open(tmp_file,'w')
if isinstance(html_snippet, bytes):
html_snippet = html_snippet.decode('utf-8')
html_file.writelines(html_snippet)
html_file.close()
'''Web server (for Papaya Viewer in QA report'''
def run_webserver(port=8088,html_page="index.html"):
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", port), Handler)
print("Serving nidmviewer at port %s" %port)
webbrowser.open("http://localhost:%s/%s" %(port,html_page))
httpd.serve_forever()
return httpd
"""Get svg html from matplotlib figures (eg, glass brain images)"""
def get_svg_html(mpl_figures):
svg_images = []
with make_tmp_folder() as tmp_dir:
for fig in mpl_figures:
tmp_svg = "%s/mplfig.svg" %(tmp_dir)
fig.savefig(tmp_svg)
fig_data = open(tmp_svg,"rb").readlines()
svg_images.append(fig_data)
return svg_images
| {
"content_hash": "0a5a762ea8fbe26472737430dc0ac88f",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 78,
"avg_line_length": 38.67307692307692,
"alnum_prop": 0.7105917454002983,
"repo_name": "vsoch/nidmviewer",
"id": "7fb6e25bcd1a67b059850fdaea25d4230180ed3e",
"size": "4022",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nidmviewer/browser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "58114"
},
{
"name": "HTML",
"bytes": "15952"
},
{
"name": "JavaScript",
"bytes": "7661"
},
{
"name": "Python",
"bytes": "36437"
}
],
"symlink_target": ""
} |
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, GREEN, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name != 'nt' or sys.getwindowsversion() >= (10, 0, 14393):
if os.name == 'nt':
import ctypes
kernel32 = ctypes.windll.kernel32
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# Enable ascii color control to stdout
stdout = kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
stdout_mode = ctypes.c_int32()
kernel32.GetConsoleMode(stdout, ctypes.byref(stdout_mode))
kernel32.SetConsoleMode(stdout, stdout_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
# Enable ascii color control to stderr
stderr = kernel32.GetStdHandle(STD_ERROR_HANDLE)
stderr_mode = ctypes.c_int32()
kernel32.GetConsoleMode(stderr, ctypes.byref(stderr_mode))
kernel32.SetConsoleMode(stderr, stderr_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
GREEN = ('\033[0m', '\033[0;32m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
BASE_SCRIPTS = [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_fee_estimation.py',
'wallet_hd.py',
'wallet_backup.py',
# vv Tests less than 5m vv
'mining_getblocktemplate_longpoll.py',
'feature_maxuploadtarget.py',
'feature_block.py',
'rpc_fundrawtransaction.py',
'p2p_compactblocks.py',
'feature_segwit.py',
# vv Tests less than 2m vv
'wallet_basic.py',
'wallet_labels.py',
'p2p_segwit.py',
'p2p_timeouts.py',
'wallet_dump.py',
'wallet_listtransactions.py',
# vv Tests less than 60s vv
'p2p_sendheaders.py',
'wallet_zapwallettxes.py',
'wallet_importmulti.py',
'mempool_limit.py',
'rpc_txoutproof.py',
'wallet_listreceivedby.py',
'wallet_abandonconflict.py',
'feature_csv_activation.py',
'rpc_rawtransaction.py',
'wallet_address_types.py',
'feature_bip68_sequence.py',
'p2p_feefilter.py',
'feature_reindex.py',
# vv Tests less than 30s vv
'wallet_keypool_topup.py',
'interface_zmq.py',
'interface_bitcoin_cli.py',
'mempool_resurrect.py',
'wallet_txn_doublespend.py --mineblock',
'tool_wallet.py',
'wallet_txn_clone.py',
'wallet_txn_clone.py --segwit',
'rpc_getchaintips.py',
'rpc_misc.py',
'interface_rest.py',
'mempool_spend_coinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'wallet_multiwallet.py',
'wallet_multiwallet.py --usecli',
'wallet_createwallet.py',
'wallet_createwallet.py --usecli',
'interface_http.py',
'interface_rpc.py',
'rpc_psbt.py',
'rpc_users.py',
'feature_proxy.py',
'rpc_signrawtransaction.py',
'wallet_groups.py',
'p2p_disconnect_ban.py',
'rpc_decodescript.py',
'rpc_blockchain.py',
'rpc_deprecated.py',
'wallet_disable.py',
'rpc_net.py',
'wallet_keypool.py',
'p2p_mempool.py',
'p2p_blocksonly.py',
'mining_prioritisetransaction.py',
'p2p_invalid_locator.py',
'p2p_invalid_block.py',
'p2p_invalid_messages.py',
'p2p_invalid_tx.py',
'feature_assumevalid.py',
'example_test.py',
'wallet_txn_doublespend.py',
'wallet_txn_clone.py --mineblock',
'feature_notifications.py',
'rpc_invalidateblock.py',
'feature_rbf.py',
'mempool_packages.py',
'rpc_createmultisig.py',
'feature_versionbits_warning.py',
'rpc_preciousblock.py',
'wallet_importprunedfunds.py',
'p2p_leak_tx.py',
'rpc_signmessage.py',
'wallet_balance.py',
'feature_nulldummy.py',
'mempool_accept.py',
'wallet_import_rescan.py',
'wallet_import_with_label.py',
'rpc_bind.py --ipv4',
'rpc_bind.py --ipv6',
'rpc_bind.py --nonloopback',
'mining_basic.py',
'wallet_bumpfee.py',
'rpc_named_arguments.py',
'wallet_listsinceblock.py',
'p2p_leak.py',
'wallet_encryption.py',
'feature_dersig.py',
'feature_cltv.py',
'rpc_uptime.py',
'wallet_resendwallettransactions.py',
'wallet_fallbackfee.py',
'feature_minchainwork.py',
'rpc_getblockstats.py',
'wallet_create_tx.py',
'p2p_fingerprint.py',
'feature_uacomment.py',
'wallet_coinbase_category.py',
'feature_filelock.py',
'p2p_unrequested_blocks.py',
'feature_includeconf.py',
'rpc_deriveaddresses.py',
'rpc_deriveaddresses.py --usecli',
'rpc_scantxoutset.py',
'feature_logging.py',
'p2p_node_network_limited.py',
'feature_blocksdir.py',
'feature_config_args.py',
'rpc_help.py',
'feature_help.py',
'feature_shutdown.py',
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_pruning.py',
'feature_dbcrash.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, metavar='n', help='On failure, print a log (of length n lines) to the console, combined from the test framework and all test nodes.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--ci', action='store_true', help='Run checks and code that are usually only enabled in a continuous integration environment')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print dots, results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
parser.add_argument('--failfast', action='store_true', help='stop execution after the first test failure')
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile, encoding="utf8"))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/test_runner_₿_🏃_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if not enable_bitcoind:
print("No functional tests to run.")
print("Rerun ./configure with --with-daemon and then make")
sys.exit(0)
# Build list of tests
test_list = []
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [test + ".py" if ".py" not in test else test for test in tests]
for test in tests:
if test in ALL_SCRIPTS:
test_list.append(test)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], test))
elif args.extended:
# Include extended tests
test_list += ALL_SCRIPTS
else:
# Run base tests only
test_list += BASE_SCRIPTS
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
exclude_tests = [test.split('.py')[0] for test in args.exclude.split(',')]
for exclude_test in exclude_tests:
# Remove <test_name>.py and <test_name>.py --arg from the test list
exclude_list = [test for test in test_list if test.split('.py')[0] == exclude_test]
for exclude_item in exclude_list:
test_list.remove(exclude_item)
if not exclude_list:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([sys.executable, os.path.join(config["environment"]["SRCDIR"], 'test', 'functional', test_list[0].split()[0]), '-h'])
sys.exit(0)
check_script_list(src_dir=config["environment"]["SRCDIR"], fail_on_warn=args.ci)
check_script_prefixes()
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(
test_list=test_list,
src_dir=config["environment"]["SRCDIR"],
build_dir=config["environment"]["BUILDDIR"],
tmpdir=tmpdir,
jobs=args.jobs,
enable_coverage=args.coverage,
args=passon_args,
combined_logs_len=args.combinedlogslen,
failfast=args.failfast,
runs_ci=args.ci,
)
def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False, runs_ci):
args = args or []
# Warn if bitcoind is already running (unix only)
try:
if subprocess.check_output(["pidof", "myriadcoind"]) is not None:
print("%sWARNING!%s There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
tests_dir = src_dir + '/test/functional/'
flags = ['--cachedir={}'.format(cache_dir)] + args
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
try:
subprocess.check_output([sys.executable, tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
#Run Tests
job_queue = TestHandler(
num_tests_parallel=jobs,
tests_dir=tests_dir,
tmpdir=tmpdir,
test_list=test_list,
flags=flags,
timeout_duration=40 * 60 if runs_ci else float('inf'), # in seconds
)
start_time = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
test_count = len(test_list)
for i in range(test_count):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
done_str = "{}/{} - {}{}{}".format(i + 1, test_count, BOLD[1], test_result.name, BOLD[0])
if test_result.status == "Passed":
logging.debug("%s passed, Duration: %s s" % (done_str, test_result.time))
elif test_result.status == "Skipped":
logging.debug("%s skipped" % (done_str))
else:
print("%s failed, Duration: %s s\n" % (done_str, test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs_args = [sys.executable, os.path.join(tests_dir, 'combine_logs.py'), testdir]
if BOLD[0]:
combined_logs_args += ['--color']
combined_logs, _ = subprocess.Popen(combined_logs_args, universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
if failfast:
logging.debug("Early exiting after test failure")
break
print_results(test_results, max_len_name, (int(time.time() - start_time)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
# This will be a no-op unless failfast is True in which case there may be dangling
# processes which need to be killed.
job_queue.kill_and_join()
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=TestResult.sort_key)
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
if not all_passed:
results += RED[1]
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
if not all_passed:
results += RED[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, *, num_tests_parallel, tests_dir, tmpdir, test_list, flags, timeout_duration):
assert num_tests_parallel >= 1
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.timeout_duration = timeout_duration
self.test_list = test_list
self.flags = flags
self.num_running = 0
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
test = self.test_list.pop(0)
portseed = len(self.test_list)
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = test.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((test,
time.time(),
subprocess.Popen([sys.executable, self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
testdir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
dot_count = 0
while True:
# Return first proc that finishes
time.sleep(.5)
for job in self.jobs:
(name, start_time, proc, testdir, log_out, log_err) = job
if int(time.time() - start_time) > self.timeout_duration:
# In travis, timeout individual tests (to stop tests hanging and not providing useful output).
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [log_file.read().decode('utf-8') for log_file in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(job)
clearline = '\r' + (' ' * dot_count) + '\r'
print(clearline, end='', flush=True)
dot_count = 0
return TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr
print('.', end='', flush=True)
dot_count += 1
def kill_and_join(self):
"""Send SIGKILL to all jobs and block until all have ended."""
procs = [i[2] for i in self.jobs]
for proc in procs:
proc.kill()
for proc in procs:
proc.wait()
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def sort_key(self):
if self.status == "Passed":
return 0, self.name.lower()
elif self.status == "Failed":
return 2, self.name.lower()
elif self.status == "Skipped":
return 1, self.name.lower()
def __repr__(self):
if self.status == "Passed":
color = GREEN
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_prefixes():
"""Check that test scripts start with one of the allowed name prefixes."""
good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet|tool)_")
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
if bad_script_names:
print("%sERROR:%s %d tests not meeting naming conventions:" % (BOLD[1], BOLD[0], len(bad_script_names)))
print(" %s" % ("\n ".join(sorted(bad_script_names))))
raise AssertionError("Some tests are not following naming convention!")
def check_script_list(*, src_dir, fail_on_warn):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([test_file for test_file in os.listdir(script_dir) if test_file.endswith(".py")])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if fail_on_warn:
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % command) for command in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r', encoding="utf8") as coverage_ref_file:
all_cmds.update([line.strip() for line in coverage_ref_file.readlines()])
for root, _, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r', encoding="utf8") as coverage_file:
covered_cmds.update([line.strip() for line in coverage_file.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
| {
"content_hash": "486e2d3f20c51f9876699eb358ee8ad9",
"timestamp": "",
"source": "github",
"line_count": 645,
"max_line_length": 205,
"avg_line_length": 38.87286821705426,
"alnum_prop": 0.6079448011805528,
"repo_name": "myriadcoin/myriadcoin",
"id": "980f4656073ab7487978bf471c273f9acc5684a0",
"size": "25298",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/functional/test_runner.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "1590918"
},
{
"name": "C++",
"bytes": "6467954"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "201405"
},
{
"name": "Makefile",
"bytes": "121719"
},
{
"name": "Objective-C",
"bytes": "6345"
},
{
"name": "Objective-C++",
"bytes": "5378"
},
{
"name": "Python",
"bytes": "1611450"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "92134"
}
],
"symlink_target": ""
} |
"""
A module for our virtual or dynamic adapters
"""
from zope.interface import Interface
__author__ = 'dimd'
class IVirtualAdapter(Interface):
"""
A base class marker for our dynamic or virtual adapters
"""
| {
"content_hash": "0ef9bb5a737497e7db42a398c891af92",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 59,
"avg_line_length": 20.181818181818183,
"alnum_prop": 0.6936936936936937,
"repo_name": "dimddev/NetCatKS",
"id": "485a392df9df54946a2d54b6810827b32bcc5328",
"size": "222",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NetCatKS/Components/api/interfaces/virtual/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "182697"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crm', '0011_auto_20161106_1320'),
]
operations = [
migrations.AddField(
model_name='outboundcontactphoneinfo',
name='is_available',
field=models.BooleanField(default=False),
),
]
| {
"content_hash": "8a24f038f92d4d2eb8c390edbcbe4557",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 53,
"avg_line_length": 22.166666666666668,
"alnum_prop": 0.6115288220551378,
"repo_name": "bpatyi/simpleCRM",
"id": "f7ce0961fd20de47559814e7b8ed2f5faea8c5df",
"size": "472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crm/migrations/0012_outboundcontactphoneinfo_is_available.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "279044"
},
{
"name": "HTML",
"bytes": "58463"
},
{
"name": "JavaScript",
"bytes": "41408"
},
{
"name": "Python",
"bytes": "129803"
}
],
"symlink_target": ""
} |
import wx
from odmtools.controller.frmAddPoints import AddPoints
from odmtools.controller.olvAddPoint import Points
__author__ = 'Jacob'
class TestAddPoints:
def setup(self):
self.app = wx.App()
self.frame = AddPoints(None)
self.olv = self.frame.olv
assert self.olv
assert len(self.olv.GetObjects()) == 0
def tearDown(self):
self.frame.Destroy()
def test_onAddBtn(self):
self.olv.AddObject(self.olv.sampleRow())
assert len(self.olv.GetObjects()) == 1
self.olv.AddObject(self.olv.sampleRow())
assert len(self.olv.GetObjects()) == 2
assert not len(self.olv.GetObjects()) == 3
self.olv.SetObjects(None)
assert not self.olv.GetObjects()
size = 99999
objects = self._buildObjects(size)
self.olv.SetObjects(objects)
assert len(self.olv.GetObjects()) == size
'''
Note:
If you need this event to
be processed synchronously use
(self.GetEventhandler().ProcessEvent(event)) to fire the event instead
that way it will get handled before the next part of your code is
executed.
So for example
def FireEvent(self):
evt = MyEvent(...)
self.GetEventHandler().ProcessEvent(evt)
self.updateDrawing()
'''
evt = wx.PyCommandEvent(wx.EVT_BUTTON.typeId, self.frame.addRowBtn.GetId())
#wx.PostEvent(self.frame.addRowBtn, evt)
self.frame.GetEventHandler().ProcessEvent(evt)
assert self.olv.GetObjects()
def test_onDeleteBtn(self):
size = 10000
values = [self.olv.sampleRow() for _ in range(size)]
assert values and len(values) == size
self.olv.AddObjects(values)
assert len(self.olv.GetObjects()) == size
selectedObjs = self.olv.GetObjects()
assert selectedObjs
if len(selectedObjs) > 1:
length = len(selectedObjs)
self.olv.RemoveObjects(selectedObjs)
assert len(self.olv.GetObjects()) == 0
else:
assert False
self.olv.AddObject(self.olv.sampleRow())
selectedObjs = self.olv.GetObjects()
if len(selectedObjs) > 1:
assert False
self.olv.RemoveObjects(selectedObjs)
assert not self.olv.GetObjects()
self.olv.AddObjects(self._buildObjects(size))
assert len(self.olv.GetObjects()) == size
evt = wx.PyCommandEvent(wx.EVT_BUTTON.typeId, self.frame.deleteRowBtn.GetId())
self.frame.GetEventHandler().ProcessEvent(evt)
def test_onClearAllBtn(self):
assert not self.olv.GetObjects()
size = 100000
self.olv.SetObjects(self._buildObjects(size))
assert len(self.olv.GetObjects()) == size
self.frame._clearAll()
assert not self.olv.GetObjects()
def test_customRemove(self):
self.olv.AddObject(self.olv.sampleRow())
assert len(self.olv.GetObjects()) == 1
objects = self.olv.GetObjects()
self.frame.customRemove(objects)
assert not self.olv.GetObjects()
## Test order remains the same after removing
size = 10000
objects = self._buildObjects(size)
# for i in objects:
# print i.dataValue
assert len(objects) == size
tests = [1, 5, 25, 100, 150, 300, 600, 55, 9000]
for test in objects:
if test.dataValue in tests:
if not self.frame.customRemove(test):
assert False
currentObjects = self.olv.GetObjects()
for obj in currentObjects:
if obj.dataValue not in objects:
assert False
def test_isCorrect(self):
import pandas as pd
self.col = ['DataValue', 'Date', 'Time', 'UTCOffSet', 'CensorCode', 'ValueAccuracy', 'OffSetValue',
'OffSetType', 'QualifierCode', 'LabSampleCode']
df = pd.DataFrame(columns=self.col)
df.loc[0] = ['FLOAT|INT', 'YYYY-MM-DD', 'HH:MM:SS', 'INT', 'gt|nc|lt|nd|pnq', 'FLOAT', 'FLOAT',
'String', 'String', 'String']
size = 500
pointList = []
for i in range(1, size):
df.loc[i] = ['-9999', '2005-06-29', '14:20:15', '-7', 'nc', "1.2", "1", "NULL", "NULL", "NULL"]
pointList.append(Points(*df.loc[i]))
assert len(df) == size
assert len(pointList) == size - 1
isCorrect = True
for p in pointList:
returnValue = self.olv.isCorrect(p)
if returnValue == "error":
isCorrect = False
assert isCorrect
"""Bad case"""
df.loc[len(df)+1] = ['-9999', '2005-06-29', '--:20:15', '-7', 'BadExample', "1.2", "1", "NULL", "NULL", "NULL"]
pointList.append(Points(*df.append(df.loc[len(df)])))
assert len(df) == size + 1
assert len(pointList) == size
isCorrect = True
for p in pointList:
if not self.olv.isCorrect(p):
isCorrect = False
assert not isCorrect
def _buildObjects(self, size):
return [Points(dataValue=x) for x in range(size)]
def test_onUploadBtn(self):
pass
def test_onFinishedBtn(self):
pass
| {
"content_hash": "fcc924cc39672bb0ef507d2709a4c436",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 119,
"avg_line_length": 30.935672514619885,
"alnum_prop": 0.57882797731569,
"repo_name": "ODM2/ODMToolsPython",
"id": "777efdbea21b6669f31d1046a21d502514085ff7",
"size": "5290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_controller/test_frmAddPoints.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2742"
},
{
"name": "Inno Setup",
"bytes": "10064"
},
{
"name": "PLpgSQL",
"bytes": "50590441"
},
{
"name": "PowerShell",
"bytes": "7130"
},
{
"name": "Python",
"bytes": "1516957"
},
{
"name": "Shell",
"bytes": "5544"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from django.urls import reverse
from django.utils.timezone import now
from le_utils.constants import content_kinds
from rest_framework.test import APITestCase
from kolibri.core.auth.models import Classroom
from kolibri.core.auth.models import Facility
from kolibri.core.auth.models import FacilityUser
from kolibri.core.auth.models import LearnerGroup
from kolibri.core.auth.test.helpers import provision_device
from kolibri.core.exams.models import Exam
from kolibri.core.exams.models import ExamAssignment
from kolibri.core.lessons.models import Lesson
from kolibri.core.lessons.models import LessonAssignment
from kolibri.core.logger.models import ContentSummaryLog
from kolibri.core.logger.models import MasteryLog
class LearnerClassroomTestCase(APITestCase):
def setUp(self):
provision_device()
self.facility = Facility.objects.create(name="My Facility")
self.coach_user = FacilityUser.objects.create(
username="admin", facility=self.facility
)
self.coach_user.set_password("password")
self.coach_user.save()
self.learner_user = FacilityUser.objects.create(
username="learner", facility=self.facility
)
self.learner_user.set_password("password")
self.learner_user.save()
self.basename = "kolibri:kolibri.plugins.learn:learnerclassroom"
self.own_classroom = Classroom.objects.create(
name="Own Classroom", parent=self.facility
)
self.own_classroom.add_member(self.learner_user)
def test_must_be_authenticated(self):
get_response = self.client.get(reverse(self.basename + "-list"))
self.assertEqual(get_response.status_code, 403)
def test_learner_only_sees_own_classrooms(self):
self.client.login(username="learner", password="password")
Classroom.objects.create(name="Other Classroom", parent=self.facility)
get_response = self.client.get(reverse(self.basename + "-list"))
self.assertEqual(len(get_response.data), 1)
self.assertEqual(get_response.data[0]["id"], self.own_classroom.id)
def test_correct_number_of_exams(self):
# One active and inactive exam
exam_1 = Exam.objects.create(
title="Exam",
collection=self.own_classroom,
question_count=10,
creator=self.coach_user,
active=True,
)
exam_2 = Exam.objects.create(
title="Inactive Exam",
collection=self.own_classroom,
question_count=10,
creator=self.coach_user,
active=False,
)
lgroup = LearnerGroup.objects.create(
name="Learner Group", parent=self.own_classroom
)
lgroup.add_learner(self.learner_user)
ExamAssignment.objects.create(
exam=exam_1, collection=lgroup, assigned_by=self.coach_user
)
ExamAssignment.objects.create(
exam=exam_2, collection=lgroup, assigned_by=self.coach_user
)
self.client.login(username="learner", password="password")
get_response = self.client.get(
reverse(self.basename + "-detail", kwargs={"pk": self.own_classroom.id})
)
self.assertEqual(len(get_response.data["assignments"]["exams"]), 1)
def test_correct_number_of_attempted_exams(self):
# One active exam and two inactive exams, but one attempted
exam_1 = Exam.objects.create(
title="Exam",
collection=self.own_classroom,
question_count=10,
creator=self.coach_user,
active=True,
)
exam_2 = Exam.objects.create(
title="Inactive Exam",
collection=self.own_classroom,
question_count=10,
creator=self.coach_user,
active=False,
)
exam_3 = Exam.objects.create(
title="Inactive Attempted Exam",
collection=self.own_classroom,
question_count=10,
creator=self.coach_user,
active=False,
)
lgroup = LearnerGroup.objects.create(
name="Learner Group", parent=self.own_classroom
)
lgroup.add_learner(self.learner_user)
ExamAssignment.objects.create(
exam=exam_1, collection=lgroup, assigned_by=self.coach_user
)
ExamAssignment.objects.create(
exam=exam_2, collection=lgroup, assigned_by=self.coach_user
)
ExamAssignment.objects.create(
exam=exam_3, collection=lgroup, assigned_by=self.coach_user
)
summarylog = ContentSummaryLog.objects.create(
user=self.learner_user,
content_id=exam_3.id,
kind=content_kinds.QUIZ,
progress=0.0,
start_timestamp=now(),
)
MasteryLog.objects.create(
user=self.learner_user,
summarylog=summarylog,
start_timestamp=now(),
mastery_level=1,
)
self.client.login(username="learner", password="password")
get_response = self.client.get(
reverse(self.basename + "-detail", kwargs={"pk": self.own_classroom.id})
)
self.assertEqual(len(get_response.data["assignments"]["exams"]), 2)
def test_correct_number_of_lessons(self):
# One active and inactive lesson
lesson_1 = Lesson.objects.create(
title="Lesson",
collection=self.own_classroom,
created_by=self.coach_user,
is_active=True,
)
lesson_2 = Lesson.objects.create(
title="Inactive Lesson",
collection=self.own_classroom,
created_by=self.coach_user,
is_active=False,
)
lgroup = LearnerGroup.objects.create(
name="Learner Group", parent=self.own_classroom
)
lgroup.add_learner(self.learner_user)
LessonAssignment.objects.create(
lesson=lesson_1, collection=lgroup, assigned_by=self.coach_user
)
LessonAssignment.objects.create(
lesson=lesson_2, collection=lgroup, assigned_by=self.coach_user
)
self.client.login(username="learner", password="password")
get_response = self.client.get(
reverse(self.basename + "-detail", kwargs={"pk": self.own_classroom.id})
)
self.assertEqual(len(get_response.data["assignments"]["lessons"]), 1)
def test_learner_only_sees_lessons_for_enrolled_classroom(self):
classroom = Classroom.objects.create(
name="Other Classroom", parent=self.facility
)
lesson = Lesson.objects.create(
title="Lesson",
collection=classroom,
created_by=self.coach_user,
is_active=True,
)
LessonAssignment.objects.create(
lesson=lesson, collection=classroom, assigned_by=self.coach_user
)
self.client.login(username="learner", password="password")
get_response = self.client.get(reverse(self.basename + "-list"))
self.assertEqual(len(get_response.data[0]["assignments"]["lessons"]), 0)
def test_learner_only_sees_lessons_for_single_classroom_when_enrolled_in_multiple(
self,
):
classroom = Classroom.objects.create(
name="Other Classroom", parent=self.facility
)
classroom.add_member(self.learner_user)
lesson = Lesson.objects.create(
title="Lesson",
collection=self.own_classroom,
created_by=self.coach_user,
is_active=True,
)
LessonAssignment.objects.create(
lesson=lesson, collection=self.own_classroom, assigned_by=self.coach_user
)
self.client.login(username="learner", password="password")
get_response = self.client.get(reverse(self.basename + "-list"))
total_lessons = len(get_response.data[0]["assignments"]["lessons"]) + len(
get_response.data[1]["assignments"]["lessons"]
)
self.assertEqual(total_lessons, Lesson.objects.count())
| {
"content_hash": "02d4e908a001bca79cd7acd1377ba4b2",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 86,
"avg_line_length": 39.352380952380955,
"alnum_prop": 0.6246369796708615,
"repo_name": "learningequality/kolibri",
"id": "449e914f82294b5e613dfe81b542d48eb39524bf",
"size": "8264",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "kolibri/plugins/learn/test/test_learner_classroom.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3095586"
},
{
"name": "Dockerfile",
"bytes": "3559"
},
{
"name": "Gherkin",
"bytes": "996801"
},
{
"name": "HTML",
"bytes": "22573"
},
{
"name": "JavaScript",
"bytes": "2233801"
},
{
"name": "Makefile",
"bytes": "12972"
},
{
"name": "Python",
"bytes": "3652744"
},
{
"name": "SCSS",
"bytes": "8551"
},
{
"name": "Shell",
"bytes": "3867"
},
{
"name": "Vue",
"bytes": "2193917"
}
],
"symlink_target": ""
} |
import sys
sys.path.append("/home/ppershing/python/numberjack/local_lib");
sys.setrecursionlimit(4000)
import Numberjack
import time
import pystp
import os
from datetime import datetime
def readfile(filename):
f = open(filename, "r")
tmp = f.readline()
cur_cost = float(tmp.strip())
tmp = f.readline()
N = int(tmp.strip())
cost_matrix = []
for i in range(N):
qq = []
tmp = f.readline()
tmp = tmp.strip().split()
for j in range(N):
qq.append(float(tmp[j]))
cost_matrix.append(qq)
fixed = [{}, {}]
for _f in range(2):
tmp = f.readline()
F = int(tmp.strip())
for i in range(F):
tmp = f.readline()
tmp = tmp.strip().split()
a = int(tmp[0])
b = int(tmp[1])
a,b = sorted([a,b])
fixed[_f][(b,a)] = float(tmp[2])
return cur_cost, N, cost_matrix, fixed
def save_solution(filename, solution):
f = open(filename, "w")
if solution is None:
print >>f, "was_optimal"
else:
print >>f, "new_solution"
for x in range(2):
for i in solution[x]:
print >>f, i
f.close()
def preprocess_fixed(fixed):
sum0 = sum(fixed[0].values())
sum1 = sum(fixed[1].values())
delta = sum1 - sum0
for x in fixed[0].keys():
fixed[0][x] = 0.0
for x in fixed[1].keys():
fixed[1][x] = 0.0
fixed[1][fixed[1].keys()[0]] = delta
return fixed, sum0
def get_subtours(solution):
for i in range(N):
t = 0
for j in range(i):
t += solution[i][j]
for j in range(i+1, N):
t += solution[j][i]
assert t == 2
def get_next(i, prev):
for j in range(i):
if solution[i][j] and j != prev:
return j
for j in range(i+1, N):
if solution[j][i] and j != prev:
return j
assert False
used = []
tours = []
for i in range(N):
if i not in used:
subtour = [i]
previous = -1
used.append(i)
j = get_next(i, previous)
while j not in used:
subtour.append(j)
used.append(j)
previous = i
i = j
j = get_next(i, previous)
tours.append(subtour)
tmp = [ x for tour in tours for x in tour ]
assert len(tmp) == len(solution)
return tours
def vertex_min_edges(i, cost_matrix, fixed, cnt):
tmp = []
tmp_fixed = []
for j in range(N):
f = 0
b,a = sorted([i,j])
if (a,b) in fixed[0]:
tmp_fixed.append((fixed[0][(a,b)], (i,j)))
f += 1
if (a,b) in fixed[1]:
tmp_fixed.append((fixed[1][(a,b)], (i,j)))
f += 1
if len(fixed) == 3 and (a,b) in fixed[2]:
tmp_fixed.append((fixed[2][(a,b)], (i,j)))
f += 1
assert f <= 2
if f < 2 and i != j:
tmp.append((cost_matrix[i][j], (i,j)))
tmp.sort()
tmp = tmp_fixed + tmp
return tmp[:cnt] # fixed + best edges
def get_min_edges(cost_matrix, fixed, cnt):
edges = []
for i in range(N):
tmp = vertex_min_edges(i, cost_matrix, fixed, cnt)
edges.append(tmp) # save them
cost = sum([cost for l in edges for cost,desc in l])
return (cost, edges)
def find_irrelevant(cost_matrix, fixed, upper_bound):
lower_bound, edges = get_min_edges(cost_matrix, fixed, 4)
# UB cost = max(sum path)
# LB cost <= sum(path * 2)
# so adjust UB to match our definition
upper_bound = upper_bound * 4
print "min4lb", lower_bound
print "upperb", upper_bound
print "delta:", upper_bound - lower_bound
irrelevant = {}
for i in range(N):
for j in range(i):
if (i,j) in fixed[0] and (i,j) in fixed[1]:
continue # fixed edge, cannot play with it anyway
old = edges[i] + edges[j]
tmp = [ fixed[0],
fixed[1],
{(i,j):cost_matrix[i][j]},
]
new = vertex_min_edges(i, cost_matrix, tmp, 4)
new += vertex_min_edges(j, cost_matrix, tmp, 4)
oldc = sum([cost for cost,desc in old])
newc = sum([cost for cost,desc in new])
#print lower_bound, newc, oldc, lower_bound + newc - oldc, upper_bound
if (lower_bound + newc - oldc - 0.001 > upper_bound):
irrelevant[(i,j)] = True
irrelevant[(j,i)] = True
return irrelevant
def get_4factor(N, cost_matrix, fixed, irrelevant, upper_bound, cost_delta):
model = Numberjack.Model()
selected = [None, None]
cost = [None, None]
for x in range(2):
# premenna ci je hrana na orientovanej ceste
selected[x] = []
for i in range(N):
selected[x].append([ Numberjack.Variable(0, 1) for j in range(i) ])
for i,j in fixed[x]:
model.add(selected[x][i][j] == 1)
for i,j in irrelevant:
if i>j and (i,j) not in fixed[x] and (j,i) not in fixed[x]:
model.add(selected[x][i][j] == 0)
# fix degree
for i in range(0, N):
tmp = [ selected[x][i][j] for j in range(i) ]
tmp += [ selected[x][j][i] for j in range(i+1, N) ]
model.add(Numberjack.Sum(tmp) == 2)
# cost
cost[x] = Numberjack.Variable(-1e20, 1e20)
a = []
b = []
for i in range(N):
for j in range(i):
a.append(selected[x][i][j])
b.append(fixed[x][(i,j)] if (i,j) in fixed[x] else cost_matrix[i][j])
model.add(cost[x] == Numberjack.Sum(a,b))
objective = Numberjack.Variable(-1e20, 1e20)
model.add(objective == Numberjack.Sum(cost, [0.5, 0.5]))
model.add(Numberjack.Minimize(objective))
# exclusivity
for i in range(N):
for j in range(i):
if (i,j) in fixed[0] or (i,j) in fixed[1]:
continue
model.add(selected[0][i][j] + selected[1][i][j] <= 1)
solver = model.load('SCIP')
solver.setVerbosity(1)
res = solver.solve()
assert res == True
lower_bound = objective.get_value()
print ">>>> lower bound", lower_bound + cost_delta
print ">>>> upper bound", upper_bound + cost_delta
print ">>>> gap: %.2f " % (upper_bound - lower_bound)
if lower_bound + 0.01 >= upper_bound:
print ">>>> was already optimal"
assert lower_bound - 0.01 < upper_bound
return None, None
factor41 = [ [ selected[0][i][j].get_value() for j in range(i) ] for i in range(N) ]
factor42 = [ [ selected[1][i][j].get_value() for j in range(i) ] for i in range(N) ]
solver.delete()
return factor41, factor42
def solve2factor_using_subtour_elim(N, fixed, factor4):
# now we have 4factor, let us use STP
stp = pystp.Stp()
stp.setFlags("o")
two_bit = stp.createType(pystp.TYPE_BITVECTOR, 2)
# constant value
def Const(value, bits=2):
return stp.bvConstExprFromLL(bits, value)
# unsigned less
def Less(a, b):
return stp.bvLeExpr(a, b)
# equality
def Equal(a, b):
return stp.eqExpr(a, b)
selected = [None, None]
print "creating variables"
for x in range(2):
# premenna ci je hrana na orientovanej ceste
selected[x] = []
for i in range(N):
selected[x].append([ stp.varExpr("selected_%d_%d_%d" % (x,i,j), two_bit) for j in range(i) ])
print "assert 0-1 range"
for x in range(2):
for i in range(N):
for j in range(i):
expr = Less(selected[x][i][j], Const(1))
stp.assertFormula(expr)
print "assert 1 for all fixed edges and remove them from 4-factor"
for x in range(2):
for i in range(N):
for j in range(i):
if (i,j) in fixed[x]:
expr = Equal(selected[x][i][j], Const(1))
stp.assertFormula(expr)
factor4[i][j] = factor4[i][j] - 1 # remove fixed edge from 4-factor
print "assert 0 for all edges not in 4-factor"
for x in range(2):
for i in range(N):
for j in range(i):
if (i,j) not in fixed[x] and factor4[i][j] == 0:
formula = Equal(selected[x][i][j], Const(0))
stp.assertFormula(formula)
print "assert 2-factors"
for x in range(2):
for i in range(N):
expr = Const(0, 32) # used 32bit variables here
for j in range(i):
expr = stp.bvPlusExpr(32, expr, stp.bvConcatExpr(Const(0,30), selected[x][i][j]))
for j in range(i+1, N):
expr = stp.bvPlusExpr(32, expr, stp.bvConcatExpr(Const(0, 30), selected[x][j][i]))
stp.assertFormula(Equal(expr, Const(2, 32)))
print "assert exclusivity"
for i in range(N):
for j in range(i):
if (i,j) in fixed[0] or (i,j) in fixed[1]:
continue
tmp = stp.bvAndExpr(selected[0][i][j], selected[1][i][j])
expr = Equal(tmp, Const(1))
stp.assertFormula(stp.notExpr(expr))
print "basic constrains ok, solving"
subtours = []
subtour_add = []
while True:
print "eliminating", len(subtours), "subtours"
for x in range(2):
for subtour in subtour_add:
assert len(subtour) > 2
# primary condition
tmp = []
for i in subtour:
for j in range(i):
if j not in subtour:
tmp.append(selected[x][i][j])
for j in range(i+1, N):
if j not in subtour:
tmp.append(selected[x][j][i])
assert tmp # wtf tmp is empty?
expr = Const(0)
for t in tmp:
expr = stp.bvOrExpr(expr, t)
expr = Equal(expr, Const(1)) # or is nonzero
stp.assertFormula(expr)
print "querying stp"
stp.push()
res = stp.query(stp.falseExpr())
print res
assert res == False
print "done"
solution0 = [ [ stp.getCounterExample(selected[0][i][j]).getBVUnsignedLongLong() for j in range(i) ] for i in range(N) ]
solution1 = [ [ stp.getCounterExample(selected[1][i][j]).getBVUnsignedLongLong() for j in range(i) ] for i in range(N) ]
stp.pop()
tmp0 = get_subtours(solution0)
tmp1 = get_subtours(solution1)
if len(tmp0) == 1 and len(tmp1) == 1:
print "have solution!"
return (tmp0[0], tmp1[0])
subtour_add = []
if len(tmp0) != 1:
subtour_add += tmp0
if len(tmp1) != 1:
subtour_add += tmp1
for xxx in subtour_add:
assert xxx not in subtours
subtours += subtour_add
# print (solution0)
# print subtours
infile = sys.argv[1]
outfile = sys.argv[2]
tempfile = outfile + ".tmp"
upper_bound, N, cost_matrix,fixed = readfile(infile)
#print N, cost_matrix, fixed
fixed, delta = preprocess_fixed(fixed)
upper_bound -= delta
irrelevant = find_irrelevant(cost_matrix, fixed, upper_bound)
print "irrelevant edges", len(irrelevant)
#print sorted(irrelevant.keys())
starttime = datetime.now()
factor41, factor42 = get_4factor(N, cost_matrix, fixed, irrelevant, upper_bound, delta)
factor4time = datetime.now()
solution = None
if not factor41:
save_solution(outfile, None)
sys.exit(0)
#if factor4:
# solution = solve2factor_using_subtour_elim(N, fixed, factor4)
endtime = datetime.now()
print N
print starttime
print factor4time
print endtime
print factor4time - starttime
print endtime - factor4time
ones = 0
f = open(tempfile, "w")
for i in xrange(0, len(factor41)):
for j in xrange(0, len(factor41[i])):
for k in xrange(0, factor41[i][j]):
print >>f, "%d %d" % (i, j)
ones += factor41[i][j]
for i in xrange(0, len(factor42)):
for j in xrange(0, len(factor42[i])):
for k in xrange(0, factor42[i][j]):
print >>f, "%d %d" % (i, j)
ones += factor42[i][j]
print ones
print N
f.close()
retval = os.system("./4factor-to-2paths.bin %s %s %s" % (infile, tempfile, outfile))
print retval
#save_solution(outfile, solution)
| {
"content_hash": "80851e947e2b1b527b7a2850323611bb",
"timestamp": "",
"source": "github",
"line_count": 393,
"max_line_length": 122,
"avg_line_length": 26.765903307888042,
"alnum_prop": 0.6259150109325982,
"repo_name": "usamec/travelling-santa",
"id": "b3b682c17ebb958ef4cda5bccf1b16897b7f03db",
"size": "10537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ilp/usama.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "58238"
},
{
"name": "C++",
"bytes": "196028"
},
{
"name": "Python",
"bytes": "38613"
},
{
"name": "Shell",
"bytes": "146"
}
],
"symlink_target": ""
} |
"""Implementation of data processing ops.
All ops should return data processing functors. Data examples are represented
as a dictionary of tensors.
Most of these were originally implemented by: Lucas Beyer, Alex Kolesnikov,
Xiaohua Zhai and other collaborators from Google Brain Zurich.
"""
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
try:
from cloud_tpu.models.efficientnet import autoaugment # pylint: disable=g-import-not-at-top
except ImportError:
autoaugment = None
class InKeyOutKey(object):
"""Decorator for preprocessing ops, which adds `inkey` and `outkey` arguments.
Note: Only supports single-input single-output ops.
"""
def __init__(self, indefault='image', outdefault='image'):
self.indefault = indefault
self.outdefault = outdefault
def __call__(self, orig_get_pp_fn):
def get_ikok_pp_fn(*args, key=None,
inkey=self.indefault, outkey=self.outdefault, **kw):
orig_pp_fn = orig_get_pp_fn(*args, **kw)
def _ikok_pp_fn(data):
data[key or outkey] = orig_pp_fn(data[key or inkey])
return data
return _ikok_pp_fn
return get_ikok_pp_fn
@InKeyOutKey()
def central_crop(crop_size):
"""Makes central crop of a given size.
Args:
crop_size: either an integer H, where H is both the height and width of the
central crop, or a list or tuple [H, W] of integers, where H and W are
height and width of the central crop respectively.
Returns:
A function, that applies central crop.
"""
if isinstance(crop_size, int):
crop_size = (crop_size, crop_size)
crop_size = tuple(crop_size)
def _crop(image):
h, w = crop_size[0], crop_size[1]
dy = (tf.shape(image)[0] - h) // 2
dx = (tf.shape(image)[1] - w) // 2
return tf.image.crop_to_bounding_box(image, dy, dx, h, w)
return _crop
def copy(inkey, outkey):
"""Copies value of `inkey` into `outkey`."""
def _copy(data):
data[outkey] = data[inkey]
return data
return _copy
@InKeyOutKey()
def decode(channels=3):
"""Decodes an encoded image string, see tf.io.decode_image."""
def _decode(image):
return tf.io.decode_image(image, channels=channels, expand_animations=False)
return _decode
@InKeyOutKey()
def decode_jpeg_and_inception_crop(resize_size=None, area_min=5, area_max=100):
"""Decodes jpeg string and makes inception-style image crop.
See `inception_crop` for details.
Args:
resize_size: Resize image to this size after crop.
area_min: minimal crop area.
area_max: maximal crop area.
Returns:
A function, that applies inception crop.
"""
def _inception_crop(image_data): # pylint: disable=missing-docstring
shape = tf.image.extract_jpeg_shape(image_data)
begin, size, _ = tf.image.sample_distorted_bounding_box(
shape,
tf.zeros([0, 0, 4], tf.float32),
area_range=(area_min / 100, area_max / 100),
min_object_covered=0, # Don't enforce a minimum area.
use_image_if_no_bounding_boxes=True)
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(begin)
target_height, target_width, _ = tf.unstack(size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.image.decode_and_crop_jpeg(image_data, crop_window, channels=3)
if resize_size:
image = resize(resize_size)({'image': image})['image']
return image
return _inception_crop
@InKeyOutKey()
def flip_lr():
"""Flips an image horizontally with probability 50%."""
def _random_flip_lr_pp(image):
return tf.image.random_flip_left_right(image)
return _random_flip_lr_pp
@InKeyOutKey()
def inception_crop(resize_size=None, area_min=5, area_max=100,
resize_method='bilinear'):
"""Makes inception-style image crop.
Inception-style crop is a random image crop (its size and aspect ratio are
random) that was used for training Inception models, see
https://www.cs.unc.edu/~wliu/papers/GoogLeNet.pdf.
Args:
resize_size: Resize image to this size after crop.
area_min: minimal crop area.
area_max: maximal crop area.
resize_method: rezied method, see tf.image.resize docs for options.
Returns:
A function, that applies inception crop.
"""
def _inception_crop(image): # pylint: disable=missing-docstring
begin, size, _ = tf.image.sample_distorted_bounding_box(
tf.shape(image),
tf.zeros([0, 0, 4], tf.float32),
area_range=(area_min / 100, area_max / 100),
min_object_covered=0, # Don't enforce a minimum area.
use_image_if_no_bounding_boxes=True)
crop = tf.slice(image, begin, size)
# Unfortunately, the above operation loses the depth-dimension. So we need
# to restore it the manual way.
crop.set_shape([None, None, image.shape[-1]])
if resize_size:
crop = resize(resize_size, resize_method)({'image': crop})['image']
return crop
return _inception_crop
def keep(*keys):
"""Keeps only the given keys."""
def _keep(data):
return {k: v for k, v in data.items() if k in keys}
return _keep
@InKeyOutKey(indefault='labels', outdefault='labels')
def onehot(depth, multi=True, on=1.0, off=0.0):
"""One-hot encodes the input.
Args:
depth: Length of the one-hot vector (how many classes).
multi: If there are multiple labels, whether to merge them into the same
"multi-hot" vector (True) or keep them as an extra dimension (False).
on: Value to fill in for the positive label (default: 1).
off: Value to fill in for negative labels (default: 0).
Returns:
Data dictionary.
"""
def _onehot(label):
# When there's more than one label, this is significantly more efficient
# than using tf.one_hot followed by tf.reduce_max; we tested.
if label.shape.rank > 0 and multi:
x = tf.scatter_nd(label[:, None],
tf.ones(tf.shape(label)[0]), (depth,))
x = tf.clip_by_value(x, 0, 1) * (on - off) + off
else:
x = tf.one_hot(label, depth, on_value=on, off_value=off)
return x
return _onehot
@InKeyOutKey()
def randaug(num_layers: int = 2, magnitude: int = 10):
"""Creates a function that applies RandAugment.
RandAugment is from the paper https://arxiv.org/abs/1909.13719,
Args:
num_layers: Integer, the number of augmentation transformations to apply
sequentially to an image. Represented as (N) in the paper. Usually best
values will be in the range [1, 3].
magnitude: Integer, shared magnitude across all augmentation operations.
Represented as (M) in the paper. Usually best values are in the range
[5, 30].
Returns:
a function that applies RandAugment.
"""
if autoaugment is None:
raise ValueError(
"In order to use RandAugment you need to install the 'cloud_tpu' "
"package. Clone the https://github.com/tensorflow/tpu repository, "
"name it 'cloud_tpu', and add the corresponding directory to your "
"PYTHONPATH.")
def _randaug(image):
return autoaugment.distort_image_with_randaugment(
image, num_layers, magnitude)
return _randaug
@InKeyOutKey()
def resize(resize_size, resize_method='bilinear'):
"""Resizes image to a given size.
Args:
resize_size: either an integer H, where H is both the new height and width
of the resized image, or a list or tuple [H, W] of integers, where H and W
are new image"s height and width respectively.
resize_method: rezied method, see tf.image.resize docs for options.
Returns:
A function for resizing an image.
"""
if isinstance(resize_size, int):
resize_size = (resize_size, resize_size)
resize_size = tuple(resize_size)
def _resize(image):
# Note: use TF-2 version of tf.image.resize as the version in TF-1 is
# buggy: https://github.com/tensorflow/tensorflow/issues/6720.
# In particular it was not equivariant with rotation and lead to the network
# to learn a shortcut in self-supervised rotation task, if rotation was
# applied after resize.
dtype = image.dtype
image = tf2.image.resize(image, resize_size, resize_method)
return tf.cast(image, dtype)
return _resize
@InKeyOutKey()
def resize_small(smaller_size):
"""Resizes the smaller side to `smaller_size` keeping aspect ratio.
Args:
smaller_size: an integer, that represents a new size of the smaller side of
an input image.
Returns:
A function, that resizes an image and preserves its aspect ratio.
"""
def _resize_small(image): # pylint: disable=missing-docstring
h, w = tf.shape(image)[0], tf.shape(image)[1]
# Figure out the necessary h/w.
ratio = (
tf.cast(smaller_size, tf.float32) /
tf.cast(tf.minimum(h, w), tf.float32))
h = tf.cast(tf.round(tf.cast(h, tf.float32) * ratio), tf.int32)
w = tf.cast(tf.round(tf.cast(w, tf.float32) * ratio), tf.int32)
return tf.image.resize_area(image[None], [h, w])[0]
return _resize_small
@InKeyOutKey()
def value_range(vmin, vmax, in_min=0, in_max=255.0, clip_values=False):
"""Transforms a [in_min,in_max] image to [vmin,vmax] range.
Input ranges in_min/in_max can be equal-size lists to rescale the invidudal
channels independently.
Args:
vmin: A scalar. Output max value.
vmax: A scalar. Output min value.
in_min: A scalar or a list of input min values to scale. If a list, the
length should match to the number of channels in the image.
in_max: A scalar or a list of input max values to scale. If a list, the
length should match to the number of channels in the image.
clip_values: Whether to clip the output values to the provided ranges.
Returns:
A function to rescale the values.
"""
def _value_range(image):
"""Scales values in given range."""
in_min_t = tf.constant(in_min, tf.float32)
in_max_t = tf.constant(in_max, tf.float32)
image = tf.cast(image, tf.float32)
image = (image - in_min_t) / (in_max_t - in_min_t)
image = vmin + image * (vmax - vmin)
if clip_values:
image = tf.clip_by_value(image, vmin, vmax)
return image
return _value_range
| {
"content_hash": "61f4dcab6e0912fdc8ad31aea0fccb22",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 94,
"avg_line_length": 30.884848484848487,
"alnum_prop": 0.6704277864992151,
"repo_name": "google-research/vmoe",
"id": "f530bded9d23d061485b0db2df86c595f662ce66",
"size": "10768",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "vmoe/data/pp_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "8574"
},
{
"name": "Python",
"bytes": "540835"
},
{
"name": "Shell",
"bytes": "2077"
}
],
"symlink_target": ""
} |
import operator
import threading
from mapproxy.grid import bbox_intersects, bbox_contains
from mapproxy.util.py import cached_property
from mapproxy.util.geom import (
require_geom_support,
load_polygon_lines,
transform_geometry,
bbox_polygon,
EmptyGeometryError,
)
from mapproxy.srs import SRS
import logging
from functools import reduce
log_config = logging.getLogger('mapproxy.config.coverage')
try:
import shapely.geometry
import shapely.prepared
except ImportError:
# missing Shapely is handled by require_geom_support
pass
def coverage(geom, srs, clip=False):
if isinstance(geom, (list, tuple)):
return BBOXCoverage(geom, srs, clip=clip)
else:
return GeomCoverage(geom, srs, clip=clip)
def load_limited_to(limited_to):
require_geom_support()
srs = SRS(limited_to['srs'])
geom = limited_to['geometry']
if not hasattr(geom, 'type'): # not a Shapely geometry
if isinstance(geom, (list, tuple)):
geom = bbox_polygon(geom)
else:
polygons = load_polygon_lines(geom.split('\n'))
if len(polygons) == 1:
geom = polygons[0]
else:
geom = shapely.geometry.MultiPolygon(polygons)
return GeomCoverage(geom, srs, clip=True)
class MultiCoverage(object):
clip = False
"""Aggregates multiple coverages"""
def __init__(self, coverages):
self.coverages = coverages
self.bbox = self.extent.bbox
@cached_property
def extent(self):
return reduce(operator.add, [c.extent for c in self.coverages])
def intersects(self, bbox, srs):
return any(c.intersects(bbox, srs) for c in self.coverages)
def contains(self, bbox, srs):
return any(c.contains(bbox, srs) for c in self.coverages)
def transform_to(self, srs):
return MultiCoverage([c.transform_to(srs) for c in self.coverages])
def __eq__(self, other):
if not isinstance(other, MultiCoverage):
return NotImplemented
if self.bbox != other.bbox:
return False
if len(self.coverages) != len(other.coverages):
return False
for a, b in zip(self.coverages, other.coverages):
if a != b:
return False
return True
def __ne__(self, other):
if not isinstance(other, MultiCoverage):
return NotImplemented
return not self.__eq__(other)
def __repr__(self):
return '<MultiCoverage %r: %r>' % (self.extent.llbbox, self.coverages)
class BBOXCoverage(object):
def __init__(self, bbox, srs, clip=False):
self.bbox = bbox
self.srs = srs
self.geom = None
self.clip = clip
@property
def extent(self):
from mapproxy.layer import MapExtent
return MapExtent(self.bbox, self.srs)
def _bbox_in_coverage_srs(self, bbox, srs):
if srs != self.srs:
bbox = srs.transform_bbox_to(self.srs, bbox)
return bbox
def intersects(self, bbox, srs):
bbox = self._bbox_in_coverage_srs(bbox, srs)
return bbox_intersects(self.bbox, bbox)
def intersection(self, bbox, srs):
bbox = self._bbox_in_coverage_srs(bbox, srs)
intersection = (
max(self.bbox[0], bbox[0]),
max(self.bbox[1], bbox[1]),
min(self.bbox[2], bbox[2]),
min(self.bbox[3], bbox[3]),
)
if intersection[0] >= intersection[2] or intersection[1] >= intersection[3]:
return None
return BBOXCoverage(intersection, self.srs, clip=self.clip)
def contains(self, bbox, srs):
bbox = self._bbox_in_coverage_srs(bbox, srs)
return bbox_contains(self.bbox, bbox)
def transform_to(self, srs):
if srs == self.srs:
return self
bbox = self.srs.transform_bbox_to(srs, self.bbox)
return BBOXCoverage(bbox, srs, clip=self.clip)
def __eq__(self, other):
if not isinstance(other, BBOXCoverage):
return NotImplemented
if self.srs != other.srs:
return False
if self.bbox != other.bbox:
return False
return True
def __ne__(self, other):
if not isinstance(other, BBOXCoverage):
return NotImplemented
return not self.__eq__(other)
def __repr__(self):
return '<BBOXCoverage %r/%r>' % (self.extent.llbbox, self.bbox)
class GeomCoverage(object):
def __init__(self, geom, srs, clip=False):
self.geom = geom
self.bbox = geom.bounds
self.srs = srs
self.clip = clip
self._prep_lock = threading.Lock()
self._prepared_geom = None
self._prepared_counter = 0
self._prepared_max = 10000
@property
def extent(self):
from mapproxy.layer import MapExtent
return MapExtent(self.bbox, self.srs)
@property
def prepared_geom(self):
# GEOS internal data structure for prepared geometries grows over time,
# recreate to limit memory consumption
if not self._prepared_geom or self._prepared_counter > self._prepared_max:
self._prepared_geom = shapely.prepared.prep(self.geom)
self._prepared_counter = 0
self._prepared_counter += 1
return self._prepared_geom
def _geom_in_coverage_srs(self, geom, srs):
if isinstance(geom, shapely.geometry.base.BaseGeometry):
if srs != self.srs:
geom = transform_geometry(srs, self.srs, geom)
elif len(geom) == 2:
if srs != self.srs:
geom = srs.transform_to(self.srs, geom)
geom = shapely.geometry.Point(geom)
else:
if srs != self.srs:
geom = srs.transform_bbox_to(self.srs, geom)
geom = bbox_polygon(geom)
return geom
def transform_to(self, srs):
if srs == self.srs:
return self
geom = transform_geometry(self.srs, srs, self.geom)
return GeomCoverage(geom, srs, clip=self.clip)
def intersects(self, bbox, srs):
bbox = self._geom_in_coverage_srs(bbox, srs)
with self._prep_lock:
return self.prepared_geom.intersects(bbox)
def intersection(self, bbox, srs):
bbox = self._geom_in_coverage_srs(bbox, srs)
return GeomCoverage(self.geom.intersection(bbox), self.srs, clip=self.clip)
def contains(self, bbox, srs):
bbox = self._geom_in_coverage_srs(bbox, srs)
with self._prep_lock:
return self.prepared_geom.contains(bbox)
def __eq__(self, other):
if not isinstance(other, GeomCoverage):
return NotImplemented
if self.srs != other.srs:
return False
if self.bbox != other.bbox:
return False
if not self.geom.equals(other.geom):
return False
return True
def __ne__(self, other):
if not isinstance(other, GeomCoverage):
return NotImplemented
return not self.__eq__(other)
def __repr__(self):
return '<GeomCoverage %r: %r>' % (self.extent.llbbox, self.geom)
def union_coverage(coverages, clip=None):
"""
Create a coverage that is the union of all `coverages`.
Resulting coverage is in the SRS of the first coverage.
"""
srs = coverages[0].srs
coverages = [c.transform_to(srs) for c in coverages]
geoms = []
for c in coverages:
if isinstance(c, BBOXCoverage):
geoms.append(bbox_polygon(c.bbox))
else:
geoms.append(c.geom)
import shapely.ops
union = shapely.ops.cascaded_union(geoms)
return GeomCoverage(union, srs=srs, clip=clip)
def diff_coverage(coverages, clip=None):
"""
Create a coverage by subtracting all `coverages` from the first one.
Resulting coverage is in the SRS of the first coverage.
"""
srs = coverages[0].srs
coverages = [c.transform_to(srs) for c in coverages]
geoms = []
for c in coverages:
if isinstance(c, BBOXCoverage):
geoms.append(bbox_polygon(c.bbox))
else:
geoms.append(c.geom)
sub = shapely.ops.cascaded_union(geoms[1:])
diff = geoms[0].difference(sub)
if diff.is_empty:
raise EmptyGeometryError("diff did not return any geometry")
return GeomCoverage(diff, srs=srs, clip=clip)
def intersection_coverage(coverages, clip=None):
"""
Create a coverage by creating the intersection of all `coverages`.
Resulting coverage is in the SRS of the first coverage.
"""
srs = coverages[0].srs
coverages = [c.transform_to(srs) for c in coverages]
geoms = []
for c in coverages:
if isinstance(c, BBOXCoverage):
geoms.append(bbox_polygon(c.bbox))
else:
geoms.append(c.geom)
intersection = reduce(lambda a, b: a.intersection(b), geoms)
if intersection.is_empty:
raise EmptyGeometryError("intersection did not return any geometry")
return GeomCoverage(intersection, srs=srs, clip=clip) | {
"content_hash": "bf5519f536993cf7139f831ba9d2a483",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 84,
"avg_line_length": 29.43225806451613,
"alnum_prop": 0.6083954405962297,
"repo_name": "olt/mapproxy",
"id": "4479d2abf2e259e3b1a20b6c89717800fba2b2d1",
"size": "9769",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mapproxy/util/coverage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12401"
},
{
"name": "HTML",
"bytes": "18782"
},
{
"name": "Makefile",
"bytes": "1045"
},
{
"name": "Python",
"bytes": "1744780"
}
],
"symlink_target": ""
} |
"""
This script will generate two headers that describe all of the clang cross compiled
functions.
The script outputs (run: 'palo/common/function-registry/gen_functions.py')
- be/src/generated-sources/palo-ir/palo-ir-functions.h
This file contains enums for all of the cross compiled functions
- be/src/generated-sources/palo-ir/palo-ir-function-names.h
This file contains a mapping of <string, enum>
Mapping of enum to compiled function name. The compiled function name only has to
be a substring of the actual, mangled compiler generated name.
TODO: should we work out the mangling rules?
"""
import string
import os
ir_functions = [
["AGG_NODE_PROCESS_ROW_BATCH_WITH_GROUPING", "process_row_batch_with_grouping"],
["AGG_NODE_PROCESS_ROW_BATCH_NO_GROUPING", "process_row_batch_no_grouping"],
# ["EXPR_GET_VALUE", "IrExprGetValue"],
# ["HASH_CRC", "IrCrcHash"],
# ["HASH_FVN", "IrFvnHash"],
["HASH_JOIN_PROCESS_BUILD_BATCH", "12HashJoinNode19process_build_batch"],
["HASH_JOIN_PROCESS_PROBE_BATCH", "12HashJoinNode19process_probe_batch"],
["EXPR_GET_BOOLEAN_VAL", "4Expr15get_boolean_val"],
["EXPR_GET_TINYINT_VAL", "4Expr16get_tiny_int_val"],
["EXPR_GET_SMALLINT_VAL", "4Expr17get_small_int_val"],
["EXPR_GET_INT_VAL", "4Expr11get_int_val"],
["EXPR_GET_BIGINT_VAL", "4Expr15get_big_int_val"],
["EXPR_GET_LARGEINT_VAL", "4Expr17get_large_int_val"],
["EXPR_GET_FLOAT_VAL", "4Expr13get_float_val"],
["EXPR_GET_DOUBLE_VAL", "4Expr14get_double_val"],
["EXPR_GET_STRING_VAL", "4Expr14get_string_val"],
["EXPR_GET_DATETIME_VAL", "4Expr16get_datetime_val"],
["EXPR_GET_DECIMAL_VAL", "4Expr15get_decimal_val"],
["HASH_CRC", "ir_crc_hash"],
["HASH_FNV", "ir_fnv_hash"],
["FROM_DECIMAL_VAL", "16from_decimal_val"],
["TO_DECIMAL_VAL", "14to_decimal_val"],
["FROM_DATETIME_VAL", "17from_datetime_val"],
["TO_DATETIME_VAL", "15to_datetime_val"],
["IR_STRING_COMPARE", "ir_string_compare"],
# ["STRING_VALUE_EQ", "StringValueEQ"],
# ["STRING_VALUE_NE", "StringValueNE"],
# ["STRING_VALUE_GE", "StringValueGE"],
# ["STRING_VALUE_GT", "StringValueGT"],
# ["STRING_VALUE_LT", "StringValueLT"],
# ["STRING_VALUE_LE", "StringValueLE"],
# ["STRING_TO_BOOL", "IrStringToBool"],
# ["STRING_TO_INT8", "IrStringToInt8"],
# ["STRING_TO_INT16", "IrStringToInt16"],
# ["STRING_TO_INT32", "IrStringToInt32"],
# ["STRING_TO_INT64", "IrStringToInt64"],
# ["STRING_TO_FLOAT", "IrStringToFloat"],
# ["STRING_TO_DOUBLE", "IrStringToDouble"],
# ["STRING_IS_NULL", "IrIsNullString"],
["HLL_UPDATE_BOOLEAN", "hll_updateIN8palo_udf10BooleanVal"],
["HLL_UPDATE_TINYINT", "hll_updateIN8palo_udf10TinyIntVal"],
["HLL_UPDATE_SMALLINT", "hll_updateIN8palo_udf11SmallIntVal"],
["HLL_UPDATE_INT", "hll_updateIN8palo_udf6IntVal"],
["HLL_UPDATE_BIGINT", "hll_updateIN8palo_udf9BigIntVal"],
["HLL_UPDATE_FLOAT", "hll_updateIN8palo_udf8FloatVal"],
["HLL_UPDATE_DOUBLE", "hll_updateIN8palo_udf9DoubleVal"],
["HLL_UPDATE_STRING", "hll_updateIN8palo_udf9StringVal"],
["HLL_UPDATE_TIMESTAMP", "hll_updateIN8palo_udf11DateTimeVal"],
["HLL_UPDATE_DECIMAL", "hll_updateIN8palo_udf10DecimalVal"],
["HLL_MERGE", "hll_merge"],
["CODEGEN_ANYVAL_DATETIME_VAL_EQ", "datetime_val_eq"],
["CODEGEN_ANYVAL_STRING_VAL_EQ", "string_val_eq"],
["CODEGEN_ANYVAL_DECIMAL_VAL_EQ", "decimal_val_eq"],
["CODEGEN_ANYVAL_DATETIME_VALUE_EQ", "datetime_value_eq"],
["CODEGEN_ANYVAL_STRING_VALUE_EQ", "string_value_eq"],
["CODEGEN_ANYVAL_DECIMAL_VALUE_EQ", "decimal_value_eq"],
["RAW_VALUE_COMPARE", "8RawValue7compare"],
]
enums_preamble = '\
// Modifications copyright (C) 2017, Baidu.com, Inc.\n\
// Copyright 2017 The Apache Software Foundation\n\
//\n\
// Licensed under the Apache License, Version 2.0 (the "License");\n\
// you may not use this file except in compliance with the License.\n\
// You may obtain a copy of the License at\n\
//\n\
// http://www.apache.org/licenses/LICENSE-2.0\n\
//\n\
// Unless required by applicable law or agreed to in writing, software\n\
// distributed under the License is distributed on an "AS IS" BASIS,\n\
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\
// See the License for the specific language governing permissions and\n\
// limitations under the License.\n\
\n\
// This is a generated file, DO NOT EDIT IT.\n\
// To add new functions, see be/src/codegen/gen_ir_descriptions.py.\n\
\n\
#ifndef PALO_IR_FUNCTIONS_H\n\
#define PALO_IR_FUNCTIONS_H\n\
\n\
namespace palo {\n\
\n\
class IRFunction {\n\
public:\n\
enum Type {\n'
enums_epilogue = '\
};\n\
};\n\
\n\
}\n\
\n\
#endif\n'
names_preamble = '\
// Modifications copyright (C) 2017, Baidu.com, Inc.\n\
// Copyright 2017 The Apache Software Foundation\n\
//\n\
// Licensed under the Apache License, Version 2.0 (the "License");\n\
// you may not use this file except in compliance with the License.\n\
// You may obtain a copy of the License at\n\
//\n\
// http://www.apache.org/licenses/LICENSE-2.0\n\
//\n\
// Unless required by applicable law or agreed to in writing, software\n\
// distributed under the License is distributed on an "AS IS" BASIS,\n\
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\
// See the License for the specific language governing permissions and\n\
// limitations under the License.\n\
\n\
// This is a generated file, DO NOT EDIT IT.\n\
// To add new functions, see be/src/codegen/gen_ir_descriptions.py.\n\
\n\
#ifndef PALO_IR_FUNCTION_NAMES_H\n\
#define PALO_IR_FUNCTION_NAMES_H\n\
\n\
#include "palo_ir/palo_ir_functions.h"\n\
\n\
namespace palo {\n\
\n\
static struct {\n\
std::string fn_name; \n\
IRFunction::Type fn; \n\
} FN_MAPPINGS[] = {\n'
names_epilogue = '\
};\n\
\n\
}\n\
\n\
#endif\n'
BE_PATH = os.environ['PALO_HOME'] + "/gensrc/build/palo_ir/"
if not os.path.exists(BE_PATH):
os.makedirs(BE_PATH)
if __name__ == "__main__":
print "Generating IR description files"
enums_file = open(BE_PATH + 'palo_ir_functions.h', 'w')
enums_file.write(enums_preamble)
names_file = open(BE_PATH + 'palo_ir_names.h', 'w')
names_file.write(names_preamble)
idx = 0
enums_file.write(" FN_START = " + str(idx) + ",\n")
for fn in ir_functions:
enum = fn[0]
fn_name = fn[1]
enums_file.write(" " + enum + " = " + str(idx) + ",\n")
names_file.write(" { \"" + fn_name + "\", IRFunction::" + enum + " },\n")
idx = idx + 1
enums_file.write(" FN_END = " + str(idx) + "\n")
enums_file.write(enums_epilogue)
enums_file.close()
names_file.write(names_epilogue)
names_file.close()
| {
"content_hash": "3f92189a12d2b1f4209772649e3c6f91",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 84,
"avg_line_length": 37.62011173184357,
"alnum_prop": 0.6645381645381645,
"repo_name": "lingbin/palo",
"id": "bae420095cf0cd5e51b0ea7d22b710e78d416680",
"size": "7621",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "be/src/codegen/gen_ir_descriptions.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "434038"
},
{
"name": "C++",
"bytes": "9318216"
},
{
"name": "CMake",
"bytes": "59815"
},
{
"name": "CSS",
"bytes": "3843"
},
{
"name": "Java",
"bytes": "6152346"
},
{
"name": "JavaScript",
"bytes": "5625"
},
{
"name": "Lex",
"bytes": "28991"
},
{
"name": "Makefile",
"bytes": "9065"
},
{
"name": "Python",
"bytes": "124341"
},
{
"name": "Shell",
"bytes": "32156"
},
{
"name": "Thrift",
"bytes": "168087"
},
{
"name": "Yacc",
"bytes": "97015"
}
],
"symlink_target": ""
} |
"""Tests for the Atag config flow."""
from unittest.mock import PropertyMock, patch
from pyatag import errors
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.atag import DOMAIN
from homeassistant.core import HomeAssistant
from tests.components.atag import (
PAIR_REPLY,
RECEIVE_REPLY,
UID,
USER_INPUT,
init_integration,
)
from tests.test_util.aiohttp import AiohttpClientMocker
async def test_show_form(hass):
"""Test that the form is served with no input."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_adding_second_device(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test that only one Atag configuration is allowed."""
await init_integration(hass, aioclient_mock)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=USER_INPUT
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
with patch(
"pyatag.AtagOne.id",
new_callable=PropertyMock(return_value="secondary_device"),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=USER_INPUT
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
async def test_connection_error(hass):
"""Test we show user form on Atag connection error."""
with patch("pyatag.AtagOne.authorize", side_effect=errors.AtagException()):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
data=USER_INPUT,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "cannot_connect"}
async def test_unauthorized(hass):
"""Test we show correct form when Unauthorized error is raised."""
with patch("pyatag.AtagOne.authorize", side_effect=errors.Unauthorized()):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
data=USER_INPUT,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "unauthorized"}
async def test_full_flow_implementation(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test registering an integration and finishing flow works."""
aioclient_mock.post(
"http://127.0.0.1:10000/pair",
json=PAIR_REPLY,
)
aioclient_mock.post(
"http://127.0.0.1:10000/retrieve",
json=RECEIVE_REPLY,
)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
data=USER_INPUT,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == UID
assert result["result"].unique_id == UID
| {
"content_hash": "0f11f997d6a67174e9ed12889b0a8510",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 83,
"avg_line_length": 34.103092783505154,
"alnum_prop": 0.664752116082225,
"repo_name": "turbokongen/home-assistant",
"id": "81375792c711f4be23374fb34db14f58fb4ceaec",
"size": "3308",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/atag/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "30405146"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
} |
"""This code example updates the destination URL of a single image creative.
To determine which image creatives exist, run get_all_creatives.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CreativeService.updateCreatives
CreativeService.getCreativesByStatement
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
IMAGE_CREATIVE_ID = 'INSERT_IMAGE_CREATIVE_ID_HERE'
def main(client, image_creative_id):
# Initialize appropriate service.
creative_service = client.GetService('CreativeService', version='v201408')
# Create statement object to get all image creatives.
values = [{
'key': 'type',
'value': {
'xsi_type': 'TextValue',
'value': 'ImageCreative'
}
}, {
'key': 'id',
'value': {
'xsi_type': 'NumberValue',
'value': image_creative_id
}
}]
query = 'WHERE creativeType = :type AND id = :id'
statement = dfp.FilterStatement(query, values, 1)
# Get creatives by statement.
response = creative_service.getCreativesByStatement(
statement.ToStatement())
if 'results' in response:
# Update each local creative object by changing its destination URL.
updated_creatives = []
for creative in response['results']:
creative['destinationUrl'] = 'http://news.google.com'
updated_creatives.append(creative)
# Update creatives remotely.
creatives = creative_service.updateCreatives(updated_creatives)
# Display results.
for creative in creatives:
print ('Image creative with id \'%s\' and destination URL \'%s\' was '
'updated.' % (creative['id'], creative['destinationUrl']))
else:
print 'No creatives found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, IMAGE_CREATIVE_ID)
| {
"content_hash": "4eaef56cbe9a054eb0177967fd923be0",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 77,
"avg_line_length": 31.470588235294116,
"alnum_prop": 0.6822429906542056,
"repo_name": "cctaylor/googleads-python-lib",
"id": "66d4451905d7580fb3049bade1325cc23a7950ba",
"size": "2758",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/dfp/v201408/creative_service/update_creatives.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "492"
},
{
"name": "HTML",
"bytes": "8336"
},
{
"name": "JavaScript",
"bytes": "504"
},
{
"name": "Python",
"bytes": "2774292"
}
],
"symlink_target": ""
} |
from typing import Callable, Dict, List, Tuple
import GPy
import numpy as np
import scipy as sp
from GPy.inference.latent_function_inference.posterior import Posterior
from GPy.util import choleskies
from GPy.util.linalg import dpotrs, dtrtrs, jitchol, pdinv, tdot
from GPy.util.univariate_Gaussian import cdfNormal, derivLogCdfNormal, logCdfNormal, std_norm_cdf, std_norm_pdf
from scipy.integrate import dblquad, quad
from scipy.linalg import block_diag, inv, sqrtm
from scipy.special import logsumexp
from scipy.stats import multivariate_normal, norm
from ..util import adam
# Helper functions
phi = lambda x: cdfNormal(x)
sigmoid = lambda x: 1.0 / (1 + np.exp(-x))
dlogphi_df = lambda x: derivLogCdfNormal(x)
dlogsigmoid_df = lambda x: sigmoid(-x)
d2logsigmoid_df = lambda x: -sigmoid(-x) * sigmoid(x)
def dL_fr(L: np.ndarray, dsigma: np.ndarray, alpha: np.ndarray, beta: np.ndarray, K: np.ndarray):
"""
Partial derivative of function with respect to beta using cholesky decomposition and the generalized matrix chain rule
The ending _fr comes from "full rank", this method should be used when we use the full rank parametrization
:param L: Cholesky decomposition of Sigma: Sigma= L L^T
:param dsigma: derivative of the function with respect to Sigma
:param alpha: alpha parameter
:param beta: beta parameter, the vector the derivative is taken with respect to
:param K: prior covariance matrix
:return: The derivative of function with respect to beta
"""
Sigma = L @ L.T
t2 = np.zeros_like(dsigma)
for m in range(t2.shape[0]):
for n in range(m + 1):
dl = np.zeros_like(dsigma)
dl[m, :] = L[:, n].T
dl += dl.T
t2[m, n] = np.trace(dsigma.T @ dl)
return t2[np.newaxis, :]
def dL_mf(L: np.ndarray, dsigma: np.ndarray, alpha: np.ndarray, beta: np.ndarray, K: np.ndarray):
"""
Partial derivative of function with respect to beta using cholesky decomposition and the generalized matrix chain rule
The ending _fr comes from "mean field", this method should be used when we use the meaan field parametrization
:param L: Cholesky decomposition of Sigma: Sigma= L L^T
:param dsigma: derivative of the function with respect to Sigma
:param alpha: alpha parameter
:param beta: beta parameter, the vector the derivative is taken with respect to
:param K: prior covariance matrix
:return: The derivative of function with respect to beta
"""
dL = np.zeros((L.shape[0], L.shape[1], beta.shape[0]))
dL2 = np.zeros((L.shape[0], L.shape[1], beta.shape[0]))
res = np.zeros_like(beta)
S = L @ L.T
for k in range(beta.shape[0]):
for i in range(L.shape[0]):
for j in range(L.shape[1]):
dL[i, j, k] = -2.0 * beta[k] * S[i, k] * S[j, k]
res[k] += np.trace(dsigma.T @ dL[:, :, k])
return res
def dSigma_dLmn(L: np.ndarray, m: int, n: int):
"""
Partial derivative of Sigma with respect to one element of L when Sigma=L L^T
:param L: Cholesky decomposition of sigma
:param m: row index in L
:param n: column index in L
:return: partial derivative of Sigma
"""
delta = 1e-5
L_new = L.copy()
L_new[m, n] += delta
Sigma_new = L_new @ L_new.T
Sigma = L @ L.T
return (Sigma_new - Sigma) / delta
def comp_y_ij(mu: np.ndarray, Sigma: np.ndarray, i: int, j: int, epsilon: float):
"""
A helper method to compute the mean and covariance of y_j-y_j when we know the joint distribution of f and y=f+noise, where noise has standard deviation epsilon
:param mu: mean of latent function f
:param Sigma: covariance of latent function f
:param i: index i in y_j - y_i
:param j: index j in y_j - y_i
:param epsilon: noise standard deviation of y
"""
m_diff = mu[j] - mu[i]
sigma_diff = np.sqrt(Sigma[i, i] + Sigma[j, j] + 2 * Sigma[i, j])
return m_diff + sigma_diff * epsilon, sigma_diff
def variational_expectations_ove_full_rank(
mf: np.ndarray, Sigma: np.ndarray, ind_winners: List[int], ind_loosers: List[int], sigma2s: float = 1.0
) -> Tuple[float, np.ndarray, np.ndarray]:
"""
Computes the variational expectation and derivatives for the full rank approximation for a single batch
:param mf: mean of the latent function approximation of the batch
:param Sigma: Covariance of the latent function approximation of the batch
:param ind_winners: List of batch winners in each pairwise comparisons. We assume that the feedback is given as batch winner form
:param ind_loosers: List of batch loosers in each pairwise comparisons
:param sigma2s: noise variance of the observations
:return: expectation and its derivatives with respect to mean and covariance
"""
N = mf.shape[0]
dF_dm = np.zeros((N, 1))
dF_dSigma = np.zeros((N, N))
# Integration by quadrature
gh_x, gh_w = np.polynomial.hermite.hermgauss(25)
gh_w = gh_w / np.sqrt(np.pi)
# to make sigmoid look more like probit
sigma2s = sigma2s / 1.6
F = 0
# i is the winner:
i = ind_winners[0]
for j in ind_loosers:
y_ij, sigma_ij = comp_y_ij(mf, Sigma, i, j, gh_x)
F += np.sum(np.log(sigmoid(y_ij / (np.sqrt(2) * sigma2s[i]))) * gh_w)
ms_y_ij = sigmoid(-y_ij / (np.sqrt(2) * sigma2s[i]))
dF_dm[i, 0] += np.sum(-gh_w * ms_y_ij / (np.sqrt(2) * sigma2s[i]))
dF_dm[j, 0] = np.sum(gh_w * ms_y_ij / (np.sqrt(2) * sigma2s[i]))
dF_dSigma[j, j] = 0.5 * np.sum(gh_w * ms_y_ij / sigma_ij * gh_x / (np.sqrt(2) * sigma2s[i]))
dF_dSigma[i, i] += dF_dSigma[j, j].copy()
dF_dSigma[i, j] = 2.0 * dF_dSigma[j, j]
return F, dF_dm, dF_dSigma
def df_d(
y: List[Tuple[int, float]],
yc: List[List[Tuple[int, int]]],
m: np.ndarray,
L: np.ndarray,
L_inv: np.ndarray,
K: np.ndarray,
sigma2s: np.ndarray,
alpha: np.ndarray,
beta: np.ndarray,
s_to_l: Callable = dL_fr,
):
"""
Computes the log marginal likelihood and its derivatives with respect to alpha and beta. Works for both mean feald and full rank approximations
:param y: Direct observations in as a list of tuples telling location index (row in X) and observation value.
:param yc: Batch comparisons in a list of lists of tuples. Each batch is a list and tuples tell the comparisons (winner index, loser index)
:param m: mean of the latent values
:param L: Cholesky decomposition of the latent value covariance
:param L_inv: inverse of the cholesky decomposition
:param K: prior covariance
:param sigma2s: noise variance of the observations
:param alpha: Alpha vector used to parametrize the posterior approximation
:param beta: Beta vector/matrix used to parametrize the posterior approximation
:param s_to_l: A function to compute the derivative of log likelihood with respect to beta using the generalized chain rule and when we know the derivative of log likelihood with respect to Sigma
:return: A tuple containing log marginal likelihood, its derivative with respect to alpha and its derivative with respect to beta
"""
Sigma = L @ L.T
dF_dm_full = np.zeros_like(m)
dF_dSigma_full = np.zeros_like(Sigma)
F_full = 0
# log_marginal = 0
d_list = np.random.choice(range(len(yc)), size=len(yc), replace=False)
for batch_idx in d_list:
loc_inds_winners, loc_inds_losers = [yc[batch_idx][k][0] for k in range(len(yc[batch_idx]))], [
yc[batch_idx][k][1] for k in range(len(yc[batch_idx]))
]
loc_inds_batch = np.sort(np.unique(loc_inds_winners + loc_inds_losers))
# get winners
ind_winners, ind_losers = [np.where(loc_inds_batch == it)[0][0] for it in loc_inds_winners], [
np.where(loc_inds_batch == it)[0][0] for it in loc_inds_losers
]
# get variational moments
F_batch, dF_dm_batch, dF_dSigma_batch = variational_expectations_ove_full_rank(
m[loc_inds_batch],
Sigma[np.ix_(loc_inds_batch, loc_inds_batch)],
ind_winners,
ind_losers,
sigma2s[loc_inds_batch],
)
dF_dm_full[loc_inds_batch] += dF_dm_batch
dF_dSigma_full[np.ix_(loc_inds_batch, loc_inds_batch)] += dF_dSigma_batch
F_full += F_batch
# delta = 1e-5
if len(y) > 0:
ys = np.zeros((len(y), 1))
y_inds = np.zeros(len(y), dtype=int)
# dir_list = np.random.choice(range(len(y)), size=len(y), replace=False)
for ind in range(len(y)):
(y_inds[ind], ys[ind, 0]) = y[ind] # index in kernel, y value
F_full += -0.5 * np.sum(
((m[y_inds] - ys) ** 2 + Sigma[y_inds, y_inds].reshape((-1, 1))) / sigma2s[y_inds].reshape((-1, 1))
)
dF_dm_full[y_inds] += (ys - m[y_inds]) / sigma2s[y_inds].reshape((-1, 1))
dF_dSigma_full[y_inds, y_inds] += -0.5 / sigma2s[y_inds].reshape((-1))
alpha_grad = K @ dF_dm_full
beta_grad = s_to_l(L, dF_dSigma_full, alpha, beta, K)
log_marginal = F_full
if beta_grad.shape[1] > 1:
beta_grad = choleskies._triang_to_flat_pure(beta_grad)
return log_marginal, alpha_grad, beta_grad
def recompute_posterior_fr(
alpha: np.ndarray, beta: np.ndarray, K: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Recompute the posterior approximation (for the full rank approximation) mean: K alpha, covariance inv(K + beta)
:param alpha: Alpha vector used to parametrize the posterior approximation
:param beta: Beta vector/matrix used to parametrize the posterior approximation
:param K: prior covariance
:return: Tuple containing the mean and cholesky of the covariance, its inverse and derivatives of the KL divergence with respect to beta and alpha
"""
N = K.shape[0]
L = choleskies._flat_to_triang_pure(beta)
assert L.shape[0] == 1
L = L[0, :, :]
lam_sqrt = np.diag(L)
lam = lam_sqrt**2
# Compute Mean
m = K @ alpha
jitter = 1e-5
dKL_da = m.copy()
Kinv = np.linalg.inv(K + np.eye(N) * jitter)
L_inv = np.linalg.inv(L)
Sigma = np.empty((alpha.size, alpha.shape[0]))
Lamda_full_rank = np.dot(L, L.T)
dKL_db_triang = -dL_fr(L, 0.5 * (np.linalg.inv(Lamda_full_rank) - Kinv), None, None, None)
mat1 = np.linalg.inv(K + Lamda_full_rank)
# Sigma = np.linalg.inv(Kinv + np.linalg.inv(Lamda_full_rank))
Sigma = Lamda_full_rank
# Compute KL
KL = 0.5 * (-N + (m.T @ Kinv @ m) + np.trace(Kinv @ Sigma) - np.log(np.linalg.det(Sigma @ Kinv)))
dKL_db = choleskies._triang_to_flat_pure(dKL_db_triang)
return m, L, L_inv, KL, dKL_db, dKL_da
def recompute_posterior_mf(
alpha: np.ndarray, beta: np.ndarray, K: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Recompute the posterior approximation (for the mean field approximation) mean: K alpha, covariance inv(K + beta)
:param alpha: Alpha vector used to parametrize the posterior approximation
:param beta: Beta vector/matrix used to parametrize the posterior approximation
:param K: prior covariance
:return: Tuple containing the mean and cholesky of the covariance, its inverse and derivatives of the KL divergence with respect to beta and alpha
"""
N = alpha.shape[0]
# Lambda = diag(lam) = diag(beta.^2)
lam_sqrt = beta.ravel()
lam = beta.ravel() ** 2
# Handle A = I + Lambda*K*Lambda
KB = K @ np.diag(lam_sqrt)
BKB = np.diag(lam_sqrt) @ KB
A = np.eye(N) + BKB
Ai, LA, Li, Alogdet = pdinv(A)
# Compute Mean
m = K @ alpha
# Compute covariance matrix
W = Li @ np.diag(1.0 / lam_sqrt) # can be accelerated using broadcasting instead of matrix multiplication
Sigma = (
np.diag(1.0 / lam) - W.T @ W
) # computes np.diag(1./lam) - np.diag(1. / lam_sqrt) @ Ai @ np.diag(1. / lam_sqrt)
# Compute KL
KL = 0.5 * (Alogdet + np.trace(Ai) - N + np.sum(m * alpha))
# Compute Gradients
A_A2 = Ai - Ai.dot(Ai)
dKL_db = np.diag(np.dot(KB.T, A_A2)).reshape(-1, 1)
# dKL_da = K @ alpha
dKL_da = m.copy()
L = GPy.util.linalg.jitchol(Sigma)
L_inv = np.linalg.inv(L)
return m, L, L_inv, KL, dKL_db, dKL_da
def log_lik(
x: np.ndarray, arg_list: Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, Callable]
) -> Tuple[np.array, np.array]:
"""
Computes the log likelihood and gradients for specific alpha and beta values concatenated in x.
:param x: Concatenated and flattened alpha and beta
:param arg_list: List of arguments that don't change during the optimization
(prior covariance, noise of the observations, observations,
comparisons, function to transfer the partial derivative)
:return: Tuple containing the log marginal and its derivative with respect to alpha and beta.
"""
K, sigma2s, y, yc, recompute_posterior, s_to_l = (
arg_list[0],
arg_list[1],
arg_list[2],
arg_list[3],
arg_list[4],
arg_list[5],
)
alpha = x[: K.shape[0]].reshape(-1, 1)
beta = x[K.shape[0] :].reshape(-1, 1)
if not isinstance(sigma2s, np.ndarray):
sigma2s = sigma2s * np.ones((K.shape[0], 1))
m, L, L_inv, KL, dKL_db, dKL_da = recompute_posterior(alpha, beta, K)
log_marginal, alpha_grad, beta_grad = df_d(y, yc, m, L, L_inv, K, sigma2s, alpha, beta, s_to_l=s_to_l)
log_marginal -= KL.sum()
alpha_grad -= dKL_da
beta_grad -= dKL_db
return -log_marginal, -np.r_[alpha_grad, beta_grad].reshape(-1)
def vi_comparison(
X: np.ndarray,
y: List[Tuple[int, float]],
yc: List[List[Tuple[int, int]]],
kern: GPy.kern.Kern,
sigma2s: np.ndarray,
alpha: np.ndarray,
beta: np.ndarray,
max_iters: int = 200,
lr: float = 1e-3,
method: str = "fr",
optimize: str = "adam",
get_logger: Callable = None,
) -> Tuple[Posterior, float, Dict, np.ndarray, np.ndarray]:
"""
:param X: All locations of both direct observations and batch comparisons
:param y: Direct observations in as a list of tuples telling location index (row in X) and observation value.
:param yc: Batch comparisons in a list of lists of tuples. Each batch is a list and tuples tell the comparisons (winner index, loser index)
:param kern: Prior covariance kernel
:param sigma2s: Noise variance of the observations
:param alpha: Initial values for alpha
:param beta: Initial values for beta
:param max_iter: macimum number of optimization iterations
:param method: full rank 'fr' or mean field 'mf' methods
:param optimize: optimization algorithm. adam or l-bfgs-B
:param get_logger: Function for receiving the legger where the prints are forwarded.
:return: A Tuple containing the posterior, log marginal likelihood, its gradients with respect to hyper parameters (not supported at the moment) and alpha and beta values
"""
if method == "fr":
recompute_posterior = recompute_posterior_fr
s_to_l = dL_fr
else:
recompute_posterior = recompute_posterior_mf
s_to_l = dL_mf
K = kern.K(X)
K = K + 1e-6 * np.identity(len(K))
N = X.shape[0]
X0 = np.r_[alpha, beta]
args = [K, sigma2s, y, yc, recompute_posterior, s_to_l]
if optimize == "adam":
X, log_marginal, _ = adam(log_lik, X0.flatten(), args, bounds=None, max_it=max_iters, get_logger=get_logger)
else:
res = sp.optimize.minimize(fun=log_lik, x0=X0.flatten(), args=args, method="L-BFGS-B", jac=True, bounds=None)
X = res.x.reshape(-1)
log_marginal = res.fun
alpha = X[: K.shape[0]].reshape(-1, 1)
beta = X[K.shape[0] :].reshape(-1, 1)
# Create posterior instance
m, L, L_inv, KL, dKL_db_, dKL_da_ = recompute_posterior(alpha, beta, K)
posterior = Posterior(mean=m, cov=L @ L.T, K=K)
grad_dict = {} # {'dL_dK': dF_dK - dKL_dK, 'dL_dthetaL':dL_dthetaL}
# return posterior, log_marginal, grad_dict
return posterior, log_marginal, grad_dict, alpha, beta
| {
"content_hash": "cee0cc0c789d36ac9cb0e0bcd894da8e",
"timestamp": "",
"source": "github",
"line_count": 401,
"max_line_length": 199,
"avg_line_length": 40.36907730673317,
"alnum_prop": 0.6402891030392883,
"repo_name": "EmuKit/emukit",
"id": "e5819e6d09731374cf0e5606a90b2fa5c1ae69ec",
"size": "16188",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "emukit/examples/preferential_batch_bayesian_optimization/pbbo/inferences/vi_batch_comparison.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "972291"
},
{
"name": "Stan",
"bytes": "1413"
}
],
"symlink_target": ""
} |
import pyeq2
import numpy, scipy.interpolate, scipy.stats
numpy.seterr(over = 'raise', divide = 'raise', invalid = 'raise', under = 'ignore') # numpy raises warnings, convert to exceptions to trap them
class IModel(object):
splineFlag = False
userSelectablePolynomialFlag = False
userSelectablePolyfunctionalFlag = False
userSelectableRationalFlag = False
userDefinedFunctionFlag = False
# "e" is removed so it is not mistaken for Euler's constant "e"
# "l" is removed so it is not mistaken for the number "1" - some fonts make these appear the same or very similar
# "o" is removed so it is not mistaken for the number "0" - some fonts make these appear the same or very similar
# VBA is case insensitive, so coefficient 'a' looks the same to VBA as coefficient 'A' - use double characters instead of capital letters
# "x", "y", "xx", and "yy" are removed so they are not mistaken for variables named x or y
listOfAdditionalCoefficientDesignators = ['a','b','c','d','f','g','h','i','j','k','m','n','p','q','r','s','t','u','v','w','z','aa','bb','cc','dd','ff','gg','hh','ii','jj','kk','mm','nn','pp','qq','rr','ss','tt','uu','vv','ww','zz']
fittingTargetDictionary = {'SSQABS': 'sum of squared absolute error',
'SSQREL': 'sum of squared relative error',
'ODR': 'sum of squared orthogonal distance',
'ABSABS': 'sum of absolute value of absolute error',
'ABSREL': 'sum of absolute value of relative error',
'PEAKABS':'peak absolute value of absolute error',
'PEAKREL':'peak absolute value of relative error',
'AIC': 'Akaike Information Criterion',
'BIC': 'Bayesian Information Criterion'
}
def __init__(self, inFittingTarget = 'SSQABS', inExtendedVersionName = 'Default'):
if inExtendedVersionName == '':
inExtendedVersionName = 'Default'
if inFittingTarget not in self.fittingTargetDictionary.keys():
raise Exception, str(inFittingTarget) + ' is not in the IModel class fitting target dictionary.'
self.extendedVersionHandler = eval('pyeq2.ExtendedVersionHandlers.ExtendedVersionHandler_' + inExtendedVersionName.replace(' ', '') + '.ExtendedVersionHandler_' + inExtendedVersionName.replace(' ', '') + '()')
self.dataCache = pyeq2.dataCache()
self.upperCoefficientBounds = []
self.lowerCoefficientBounds = []
self.estimatedCoefficients = []
self.fixedCoefficients = []
self.solvedCoefficients = []
self.polyfunctional2DFlags = []
self.polyfunctional3DFlags = []
self.xPolynomialOrder = None
self.yPolynomialOrder = None
self.rationalNumeratorFlags = []
self.rationalDenominatorFlags = []
self.fittingTarget = inFittingTarget
self.independentData1CannotContainZeroFlag = False
self.independentData1CannotContainPositiveFlag = False
self.independentData1CannotContainNegativeFlag = False
self.independentData2CannotContainZeroFlag = False
self.independentData2CannotContainPositiveFlag = False
self.independentData2CannotContainNegativeFlag = False
try:
if self._dimensionality == 2:
self.exampleData = '''
X Y
5.357 0.376
5.457 0.489
5.797 0.874
5.936 1.049
6.161 1.327
6.697 2.054
6.731 2.077
6.775 2.138
8.442 4.744
9.769 7.068
9.861 7.104
'''
else:
self.exampleData = '''
X Y Z
3.017 2.175 0.320
2.822 2.624 0.629
2.632 2.839 0.950
2.287 3.030 1.574
2.207 3.057 1.725
2.048 3.098 2.035
1.963 3.115 2.204
1.784 3.144 2.570
1.712 3.153 2.721
2.972 2.106 0.313
2.719 2.542 0.643
2.495 2.721 0.956
2.070 2.878 1.597
1.969 2.899 1.758
1.768 2.929 2.088
1.677 2.939 2.240
1.479 2.957 2.583
1.387 2.963 2.744
2.843 1.984 0.315
2.485 2.320 0.639
2.163 2.444 0.954
1.687 2.525 1.459
1.408 2.547 1.775
1.279 2.554 1.927
1.016 2.564 2.243
0.742 2.568 2.581
0.607 2.571 2.753
'''
except:
pass
def CalculateCoefficientAndFitStatistics(self):
# ensure integers are promoted to floating point with "1.0 * var"
self.nobs = 1.0 * len(self.dataCache.allDataCacheDictionary['DependentData']) # number of observations
self.ncoef = 1.0 * len(self.solvedCoefficients) # number of coef.
self.df_e = self.nobs - self.ncoef # degrees of freedom, error
self.df_r = self.ncoef - 1.0 # degrees of freedom, regression
try:
self.r2 = 1.0 - self.modelAbsoluteError.var()/self.dataCache.allDataCacheDictionary['DependentData'].var()
except:
self.r2 = None
try:
self.rmse = numpy.sqrt(numpy.sum(self.modelAbsoluteError * self.modelAbsoluteError) / self.nobs)
except:
self.rmse = None
try:
self.r2adj = 1.0 - (1.0 - self.r2)*((self.nobs - 1.0)/(self.nobs-self.ncoef)) # adjusted R-square
except:
self.r2adj = None
try:
self.Fstat = (self.r2/self.df_r) / ((1.0 - self.r2)/self.df_e) # model F-statistic
except:
self.Fstat = None
try:
self.Fpv = 1.0 - scipy.stats.f.cdf(self.Fstat, self.df_r, self.df_e) # F-statistic p-value
except:
self.Fpv = None
# Model log-likelihood, AIC, and BIC criterion values
try:
self.ll = -(self.nobs*0.5)*(1.0 + numpy.log(2.0*numpy.pi)) - (self.nobs*0.5)*numpy.log(numpy.dot(self.modelAbsoluteError,self.modelAbsoluteError)/self.nobs)
except:
self.ll = None
try:
self.aic = -2.0*self.ll/self.nobs + (2.0*self.ncoef/self.nobs)
except:
self.aic = None
try:
self.bic = -2.0*self.ll/self.nobs + (self.ncoef*numpy.log(self.nobs))/self.nobs
except:
self.bic = None
if self.splineFlag == True: # not appicable to splines. This might be better done with
self.cov_beta = None
self.sd_beta = None
self.tstat_beta = None
self.pstat_beta = None
self.ci = None
return
else:
# see both scipy.odr.odrpack and http://www.scipy.org/Cookbook/OLS
# this is inefficient but works for every possible case
model = scipy.odr.odrpack.Model(self.WrapperForODR)
self.dataCache.FindOrCreateAllDataCache(self)
data = scipy.odr.odrpack.Data(self.dataCache.allDataCacheDictionary['IndependentData'], self.dataCache.allDataCacheDictionary['DependentData'])
myodr = scipy.odr.odrpack.ODR(data, model, beta0=self.solvedCoefficients, maxit=0)
myodr.set_job(fit_type=2)
parameterStatistics = myodr.run()
self.cov_beta = parameterStatistics.cov_beta # parameter covariance matrix
try:
self.sd_beta = parameterStatistics.sd_beta * parameterStatistics.sd_beta
except:
self.sd_beta = None
self.ci = []
t_df = scipy.stats.t.ppf(0.975, self.df_e)
for i in range(len(self.solvedCoefficients)):
self.ci.append([self.solvedCoefficients[i] - t_df * parameterStatistics.sd_beta[i], self.solvedCoefficients[i] + t_df * parameterStatistics.sd_beta[i]])
try:
self.tstat_beta = self.solvedCoefficients / parameterStatistics.sd_beta # coeff t-statistics
except:
self.tstat_beta = None
try:
self.pstat_beta = (1.0 - scipy.stats.t.cdf(numpy.abs(self.tstat_beta), self.df_e)) * 2.0 # coef. p-values
except:
self.pstat_beta = None
def CalculateModelErrors(self, inCoeffs, inDictionary):
if self.fixedCoefficients != []:
self._canLinearSolverBeUsedForSSQABS = False
for i in range(len(inCoeffs)):
if self.fixedCoefficients[i]: # use None as a flag for coefficients that are not fixed
inCoeffs[i] = self.fixedCoefficients[i]
self.modelPredictions = self.CalculateModelPredictions(inCoeffs, inDictionary)
self.modelAbsoluteError = self.modelPredictions - inDictionary['DependentData']
try:
if self.dataCache.DependentDataContainsZeroFlag == False:
self.modelRelativeError = self.modelAbsoluteError / inDictionary['DependentData']
self.modelPercentError = self.modelRelativeError * 100.0
except:
self.dataCache.DependentDataContainsZeroFlag = True # this is effectively true if this code is reached
self.modelRelativeError = []
self.modelPercentError = []
def CalculateReducedDataFittingTarget(self, inCoeffs):
#save time by checking constraints and bounds first
if not self.AreCoefficientsWithinBounds(inCoeffs):
return 1.0E300
# return SSQ as we are only using this method for guessing initial coefficients
try:
try: # set any fixed coefficients
if self.fixedCoefficients != []:
self._canLinearSolverBeUsedForSSQABS = False
for i in range(len(inCoeffs)):
if self.fixedCoefficients[i]: # use None as a flag for coefficients that are not fixed
inCoeffs[i] = self.fixedCoefficients[i]
except:
pass
error = self.CalculateModelPredictions(inCoeffs, self.dataCache.reducedDataCacheDictionary) - self.dataCache.reducedDataCacheDictionary['DependentData']
ssq = numpy.sum(numpy.square(error))
except:
return 1.0E300
if numpy.isfinite(ssq):
return ssq
else:
return 1.0E300
def CalculateAllDataFittingTarget(self, inCoeffs):
#save time by checking bounds first
if not self.AreCoefficientsWithinBounds(inCoeffs):
return 1.0E300
try:
try: # set any fixed coefficients
if self.fixedCoefficients != []:
self._canLinearSolverBeUsedForSSQABS = False
for i in range(len(inCoeffs)):
if self.fixedCoefficients[i]: # use None as a flag for coefficients that are not fixed
inCoeffs[i] = self.fixedCoefficients[i]
except:
pass
self.CalculateModelErrors(inCoeffs, self.dataCache.allDataCacheDictionary)
error = self.modelAbsoluteError
if len(self.dataCache.allDataCacheDictionary['Weights']):
error = error * self.dataCache.allDataCacheDictionary['Weights']
if self.fittingTarget == "SSQABS":
val = numpy.sum(numpy.square(error))
if numpy.isfinite(val):
return val
else:
return 1.0E300
if self.fittingTarget == "SSQREL":
error = error / self.dataCache.allDataCacheDictionary['DependentData']
val = numpy.sum(numpy.square(error))
if numpy.isfinite(val):
return val
else:
return 1.0E300
if self.fittingTarget == "ABSABS":
val = numpy.sum(numpy.abs(error))
if numpy.isfinite(val):
return val
else:
return 1.0E300
if self.fittingTarget == "ABSREL":
val = numpy.sum(numpy.abs(error / self.dataCache.allDataCacheDictionary['DependentData']))
if numpy.isfinite(val):
return val
else:
return 1.0E300
if self.fittingTarget == "PEAKABS":
val = numpy.max(numpy.abs(error))
if numpy.isfinite(val):
return val
else:
return 1.0E300
if self.fittingTarget == "PEAKREL":
val = numpy.max(numpy.abs(error / self.dataCache.allDataCacheDictionary['DependentData']))
if numpy.isfinite(val):
return val
else:
return 1.0E300
if self.fittingTarget == "ODR": # this is inefficient but works for every possible case
model = scipy.odr.odrpack.Model(self.WrapperForODR)
if self.dataCache.allDataCacheDictionary['Weights']:
data = scipy.odr.odrpack.Data(self.dataCache.allDataCacheDictionary['IndependentData'], self.dataCache.allDataCacheDictionary['DependentData'], we = self.dataCache.allDataCacheDictionary['Weights'])
else:
data = scipy.odr.odrpack.Data(self.dataCache.allDataCacheDictionary['IndependentData'], self.dataCache.allDataCacheDictionary['DependentData'])
myodr = scipy.odr.odrpack.ODR(data, model, beta0=inCoeffs, maxit=0)
myodr.set_job(fit_type=2)
out = myodr.run()
val = out.sum_square
if numpy.isfinite(val):
return val
else:
return 1.0E300
# remaining targets require these
ncoef = 1.0 * len(inCoeffs)
nobs = 1.0 * len(self.dataCache.allDataCacheDictionary['DependentData'])
ll = -(nobs*0.5)*(1.0 + numpy.log(2.0*numpy.pi)) - (nobs*0.5)*numpy.log(numpy.dot(error,error)/nobs)
if self.fittingTarget == "AIC":
val = -2.0*ll/nobs + (2.0*ncoef/nobs)
if numpy.isfinite(val):
return val
else:
return 1.0E300
if self.fittingTarget == "BIC":
val = -2.0*ll/nobs + (ncoef*numpy.log(nobs))/nobs
if numpy.isfinite(val):
return val
else:
return 1.0E300
except:
return 1.0E300
def Solve(self):
solver = pyeq2.solverService()
if self.splineFlag:
return solver.SolveUsingSpline(self)
if self.fixedCoefficients != []:
self._canLinearSolverBeUsedForSSQABS = False
if self.fittingTarget == 'SSQABS':
if self.CanLinearSolverBeUsedForSSQABS() == True:
return solver.SolveUsingLinear(self)
else:
self.estimatedCoefficients = solver.SolveUsingDE(self)
self.estimatedCoefficients = solver.SolveUsingLevenbergMarquardt(self)
return solver.SolveUsingSimplex(self)
if self.fittingTarget == 'ODR':
self.estimatedCoefficients = solver.SolveUsingDE(self)
return solver.SolveUsingODR(self)
# default
self.estimatedCoefficients = solver.SolveUsingDE(self)
return solver.SolveUsingSimplex(self)
def AreCoefficientsWithinBounds(self, inCoeffs):
for index in range(len(self.upperCoefficientBounds)):
if inCoefficient[index] > self.upperCoefficientBounds[index]:
return False
for index in range(len(self.lowerCoefficientBounds)):
if inCoeffs[index] < self.lowerCoefficientBounds[index]:
return False
return True
def GetDisplayName(self):
return self.extendedVersionHandler.AssembleDisplayName(self)
def GetDisplayHTML(self):
return self.extendedVersionHandler.AssembleDisplayHTML(self)
def GetDimensionality(self):
return self._dimensionality
def CanLinearSolverBeUsedForSSQABS(self):
return self.extendedVersionHandler.CanLinearSolverBeUsedForSSQABS(self._canLinearSolverBeUsedForSSQABS)
def WrapperForScipyCurveFit(self, data, *inCoeffs):
if self.fixedCoefficients != []:
self._canLinearSolverBeUsedForSSQABS = False
for i in range(len(inCoeffs)):
if self.fixedCoefficients[i]: # use None as a flag for coefficients that are not fixed
inCoeffs[i] = self.fixedCoefficients[i]
return self.CalculateModelPredictions(inCoeffs, self.dataCache.allDataCacheDictionary)
def WrapperForODR(self, inCoeffs, data):
if numpy.array_equal(data, self.dataCache.allDataCacheDictionary['IndependentData']):
if self.fixedCoefficients != []:
self._canLinearSolverBeUsedForSSQABS = False
for i in range(len(inCoeffs)):
if self.fixedCoefficients[i]: # use None as a flag for coefficients that are not fixed
inCoeffs[i] = self.fixedCoefficients[i]
result = self.CalculateModelPredictions(inCoeffs, self.dataCache.allDataCacheDictionary)
else:
tempCache = self.dataCache.allDataCacheDictionary
self.dataCache.allDataCacheDictionary = {}
self.dataCache.allDataCacheDictionary['IndependentData'] = data
self.dataCache.FindOrCreateAllDataCache(self)
if self.fixedCoefficients != []:
self._canLinearSolverBeUsedForSSQABS = False
for i in range(len(inCoeffs)):
if self.fixedCoefficients[i]: # use None as a flag for coefficients that are not fixed
inCoeffs[i] = self.fixedCoefficients[i]
result = self.CalculateModelPredictions(inCoeffs, self.dataCache.allDataCacheDictionary)
self.dataCache.allDataCacheDictionary = tempCache
return result
def GetCoefficientDesignators(self):
return self.extendedVersionHandler.AssembleCoefficientDesignators(self)
def ShouldDataBeRejected(self, unused):
return self.extendedVersionHandler.ShouldDataBeRejected(self)
| {
"content_hash": "55a7f103f841b126d3ac20d492ab9113",
"timestamp": "",
"source": "github",
"line_count": 438,
"max_line_length": 235,
"avg_line_length": 42.60958904109589,
"alnum_prop": 0.5803461394202433,
"repo_name": "JMoravec/unkRadnet",
"id": "f8a27f7becebe809cba5521271c570188b6e0621",
"size": "19041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fitToCurve/pyeq2/IModel.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "6107"
},
{
"name": "Logos",
"bytes": "132148"
},
{
"name": "M",
"bytes": "832584"
},
{
"name": "Matlab",
"bytes": "401"
},
{
"name": "Python",
"bytes": "2747757"
},
{
"name": "Shell",
"bytes": "418"
}
],
"symlink_target": ""
} |
import sh
import click
import sys
from os import path, getcwd, makedirs
from .device import device_list
from .utils import cd, logger
from .config import get_config
from .prepare import prepare_file, prepare_lib
# UIAutomation Instruments Template location. Accurate as at Xcode 6.3
INSTRUMENTS_AUTOMATION_TEMPLATE_PATH = "/Applications/Xcode.app/Contents/Applications/Instruments.app/Contents/PlugIns/AutomationInstrument.xrplugin/Contents/Resources/Automation.tracetemplate"
@click.group()
def fixxd():
pass
def test(test_file, device=None, verbose=False, debug=False):
"""
Launch UI Automation tests from `test_file` on device `device` using instruments.
"""
logger.debug("Finding folder from {0}".format(getcwd()))
if verbose is True:
logger.setLevel("INFO")
if debug is True:
logger.setLevel("DEBUG")
cfg = get_config(getcwd())
if not cfg:
raise Exception("You must be in a fixxd folder")
if not device:
devices = device_list()
if len(devices) == 0:
raise Exception("Please plug a device")
device = devices[0]
results_dir = cfg["results_dir"]
if not path.exists(results_dir):
makedirs(results_dir)
build_dir = cfg["build_dir"]
if not path.exists(build_dir):
makedirs(build_dir)
# TODO: Detect device from CLI, test_file is meant to become a name only (test_name)
device_dir_name = path.dirname(test_file)
build_dir_with_device = path.join(build_dir, device_dir_name)
if not path.exists(build_dir_with_device):
makedirs(build_dir_with_device)
abs_test_path = path.abspath(path.join(cfg["tests_dir"], test_file))
test_path = prepare_file(abs_test_path, build_dir_with_device)
prepare_lib(cfg["lib_dir"], path.join(build_dir, "lib/"))
with cd(results_dir):
sh.instruments("-w", device,
"-t", INSTRUMENTS_AUTOMATION_TEMPLATE_PATH,
cfg["app_name"],
"-e", "UIASCRIPT", test_path,
"-e", "UIARESULTSPATH", results_dir,
_out=sys.stdout, _err=sys.stderr)
@click.command()
@click.argument("test-file")
@click.argument("device", default=None, required=False)
@click.option("--verbose", is_flag=True)
@click.option("--debug", is_flag=True)
def cli_test(*args, **kwargs):
test(*args, **kwargs)
fixxd.add_command(cli_test, "test")
| {
"content_hash": "eba1cca4b743b0ff0d58cfb93b4dd840",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 193,
"avg_line_length": 31.28205128205128,
"alnum_prop": 0.6491803278688525,
"repo_name": "Stupeflix/fixxd",
"id": "bbc37209ad12739dc06fc79797f1684be4a9634f",
"size": "2441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixxd/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12420"
}
],
"symlink_target": ""
} |
"""
Pegasus utility for checking file integrity after transfers
Usage: pegasus-integrity-check [options]
"""
from __future__ import print_function
import glob
import json
import logging
import optparse
import os
import pprint
import sys
import time
from datetime import datetime
from Pegasus.tools import worker_utils as utils
##
# Copyright 2007-2017 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
# for some reason, sometimes grp is missing, but we can do without it
try:
import grp
import pwd
except Exception:
pass
# see https://www.python.org/dev/peps/pep-0469/
try:
dict.iteritems
except AttributeError:
# Python 3
def itervalues(d):
return iter(d.values())
def iteritems(d):
return iter(d.items())
else:
# Python 2
def itervalues(d):
return d.itervalues()
def iteritems(d):
return d.iteritems()
__author__ = "Mats Rynge <rynge@isi.edu>"
# --- classes -----------------------------------------------------------------
class Singleton(type):
"""Implementation of the singleton pattern"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
cls.lock = threading.Lock()
return cls._instances[cls]
# --- global variables ----------------------------------------------------------------
prog_dir = os.path.realpath(os.path.join(os.path.dirname(sys.argv[0])))
prog_base = os.path.split(sys.argv[0])[1] # Name of this program
logger = logging.getLogger("PegasusIntegrity")
multipart_fh = None
count_succeeded = 0
count_failed = 0
total_timing = 0.0
# --- functions ----------------------------------------------------------------
def setup_logger(debug_flag):
# log to the console
console = logging.StreamHandler()
# default log level - make logger/console match
logger.setLevel(logging.INFO)
console.setLevel(logging.INFO)
# debug - from command line
if debug_flag:
logger.setLevel(logging.DEBUG)
console.setLevel(logging.DEBUG)
# formatter
formatter = logging.Formatter("Integrity check: %(message)s")
console.setFormatter(formatter)
logger.addHandler(console)
logger.debug("Logger has been configured")
def backticks(cmd_line):
"""
what would a python program be without some perl love?
"""
return (
subprocess.Popen(cmd_line, shell=True, stdout=subprocess.PIPE)
.communicate()[0]
.decode("utf-8")
)
def json_object_decoder(obj):
"""
utility function used by json.load() to parse some known objects into equivalent Python objects
"""
if "type" in obj and obj["type"] == "transfer":
t = Transfer()
# src
for surl in obj["src_urls"]:
priority = None
if "priority" in surl:
priority = int(surl["priority"])
t.add_src(surl["site_label"], surl["url"], priority)
for durl in obj["dest_urls"]:
priority = None
if "priority" in durl:
priority = int(durl["priority"])
t.add_dst(durl["site_label"], durl["url"], priority)
return t
elif "type" in obj and obj["type"] == "mkdir":
m = Mkdir()
m.set_url(obj["target"]["site_label"], obj["target"]["url"])
return m
elif "type" in obj and obj["type"] == "remove":
r = Remove()
r.set_url(obj["target"]["site_label"], obj["target"]["url"])
if "recursive" in obj["target"]:
r.set_recursive(obj["target"]["recursive"])
return r
return obj
def read_meta_data(f):
"""
Reads transfers from the new JSON based input format
"""
data = []
try:
fp = open(f, "r")
data = json.load(fp)
fp.close()
except Exception as err:
logger.critical("Error parsing the meta data: " + str(err))
return data
def myexit(rc):
"""
system exit without a stack trace
"""
try:
sys.exit(rc)
except SystemExit:
sys.exit(rc)
def generate_sha256(fname):
"""
Generates a sha256 hash for the given file
"""
tools = utils.Tools()
tools.find("openssl", "version", "([0-9]+\.[0-9]+\.[0-9]+)")
tools.find("sha256sum", "--version", "([0-9]+\.[0-9]+)")
if tools.full_path("openssl"):
cmd = tools.full_path("openssl")
if tools.version_comparable("openssl") < "001001000":
cmd += " sha -sha256"
else:
cmd += " sha256"
cmd += " " + fname
elif tools.full_path("sha256sum"):
cmd = tools.full_path("sha256sum")
cmd += " " + fname + " | sed 's/ .*//'"
else:
logger.error("openssl or sha256sum not found!")
return None
if not os.path.exists(fname):
logger.error("File " + fname + " does not exist")
return None
# generate the checksum
tc = utils.TimedCommand(cmd, log_cmd=False, log_outerr=False)
tc.run()
if tc.get_exit_code() != 0:
logger.error("Unable to determine sha256: " + tc.get_outerr())
return None
sha256 = tc.get_outerr()
sha256 = sha256.strip()
sha256 = sha256[-64:]
if len(sha256) != 64:
logger.warn("Unable to determine sha256 of " + fname)
return sha256
def iso8601(ts):
"""
Formats a UNIX timestamp in ISO 8601 format
"""
dt = datetime.utcfromtimestamp(ts)
return dt.isoformat()
def generate_yaml(lfn, pfn):
"""
Generates kickstart yaml for the given file
"""
ts_start = time.time()
sha256 = generate_sha256(pfn)
if sha256 is None:
return None
ts_end = time.time()
return " sha256: %s\n checksum_timing: %.3f\n" % (
sha256,
ts_end - ts_start,
)
def generate_fullstat_yaml(lfn, pfn):
"""
Generates kickstart yaml for the given file
"""
ts_start = time.time()
sha256 = generate_sha256(pfn)
if sha256 is None:
return None
ts_end = time.time()
yaml = ""
yaml += ' "%s":\n' % (lfn)
yaml += " comment: entry generated by pegasus-integrity\n"
yaml += ' file_name: "%s"\n' % (pfn)
try:
s = os.stat(pfn)
except Exception:
# if we can't stat, just return
return ""
uname = ""
try:
uname = pwd.getpwuid(s.st_uid).pw_name
except Exception:
pass
gname = ""
try:
gname = grp.getgrgid(s.st_gid).gr_name
except Exception:
pass
yaml += (
" mode: 0o%o\n"
" size: %d\n"
" inode: %d\n"
" nlink: %d\n"
" mtime: %s\n"
" atime: %s\n"
" ctime: %s\n"
" uid: %d\n"
" user: %s\n"
" gid: %d\n"
" group: %s\n"
" output: True\n"
% (
s.st_mode,
s.st_size,
s.st_ino,
s.st_nlink,
iso8601(s.st_mtime),
iso8601(s.st_atime),
iso8601(s.st_ctime),
s.st_uid,
uname,
s.st_gid,
gname,
)
)
yaml += " sha256: %s\n" " checksum_timing: %.3f\n" % (
sha256,
ts_end - ts_start,
)
return yaml
def check_integrity(fname, lfn, meta_data, print_timings):
"""
Checks the integrity of a file given a set of metadata
"""
global count_succeeded
global count_failed
global total_timing
ts_start = time.time()
if lfn is None or lfn == "":
lfn = fname
# find the expected checksum in the metadata
expected_sha256 = None
for entry in meta_data:
if entry["_id"] == lfn:
if "_attributes" in entry and "checksum.value" in entry["_attributes"]:
expected_sha256 = entry["_attributes"]["checksum.value"]
if expected_sha256 is None:
logger.error("No checksum in the meta data for " + lfn)
return False
current_sha256 = generate_sha256(fname)
ts_end = time.time()
total_timing += ts_end - ts_start
if print_timings:
check_info_yaml(
lfn,
fname,
current_sha256,
expected_sha256,
current_sha256 == expected_sha256,
)
# now compare them
if current_sha256 != expected_sha256:
logger.error(
"%s: Expected checksum (%s) does not match the calculated checksum (%s) (timing: %.3f)"
% (fname, expected_sha256, current_sha256, ts_end - ts_start)
)
count_failed += 1
return False
count_succeeded += 1
return True
def multipart_out(s):
"""
returns a multipart fh if required
"""
global multipart_fh
if "PEGASUS_MULTIPART_DIR" not in os.environ:
return
if multipart_fh == -1:
# previous error
return
elif multipart_fh is None:
try:
multipart_fh = open(
"%s/%d-integrity"
% (os.environ["PEGASUS_MULTIPART_DIR"], int(time.time())),
"w",
)
except Exception as e:
logger.error("Unable to write stats to $PEGASUS_MULTIPART_DIR: " + str(e))
multipart_fh = -1
# ready to write
try:
multipart_fh.write(s)
except Exception:
pass
def check_info_yaml(lfn, pfn, sha256, expected_sha256, success):
"""
prints stats for monitord
"""
multipart_out(
(' - lfn: "%s"\n' ' pfn: "%s"\n' " sha256: %s\n" " success: %s\n")
% (lfn, pfn, sha256, str(success))
)
if not success:
multipart_out(" sha256_expected: %s\n" % expected_sha256)
def dump_summary_yaml():
"""
outputs a timing block for Pegasus monitoring
"""
multipart_out(
(
"- integrity_summary:\n"
" succeeded: %d\n"
" failed: %d\n"
" duration: %.3f\n"
)
% (count_succeeded, count_failed, total_timing)
)
# --- main ----------------------------------------------------------------------------
def main():
global threads
global stats_start
global stats_end
global symlink_file_transfer
# dup stderr onto stdout
sys.stderr = sys.stdout
# Configure command line option parser
prog_usage = "usage: %s [options]" % (prog_base)
parser = optparse.OptionParser(usage=prog_usage)
parser.add_option(
"",
"--generate",
action="store",
dest="generate_files",
help="Generate a SHA256 hash for a set of files",
)
parser.add_option(
"",
"--generate-yaml",
action="store",
dest="generate_yaml",
help="Generate hashes for the given file, output to kickstart yaml.",
)
parser.add_option(
"",
"--generate-fullstat-yaml",
action="store",
dest="generate_fullstat_yaml",
help="Generate hashes for the given file, output to kickstart yaml.",
)
parser.add_option(
"",
"--generate-xmls",
action="store",
dest="generate_xmls",
help="Generate hashes for the given file, output to kickstart xml.",
)
parser.add_option(
"",
"--generate-fullstat-xmls",
action="store",
dest="generate_fullstat_xmls",
help="Generate hashes for the given file, output to kickstart xml.",
)
parser.add_option(
"",
"--verify",
action="store",
dest="verify_files",
help="Verify the hash for the given file.",
)
parser.add_option(
"",
"--print-timings",
action="store_true",
dest="print_timings",
help="Display timing data after verifying files",
)
parser.add_option(
"",
"--debug",
action="store_true",
dest="debug",
help="Enables debugging output.",
)
# Parse command line options
(options, args) = parser.parse_args()
setup_logger(options.debug)
# sanity checks
if (
sum(
[
options.generate_files is not None,
options.generate_yaml is not None,
options.generate_fullstat_yaml is not None,
options.generate_xmls is not None,
options.generate_fullstat_xmls is not None,
options.verify_files is not None,
]
)
!= 1
):
logger.error(
"One, and only one, of --generate-* and --verify needs to be specified"
)
parser.print_help()
sys.exit(1)
if options.generate_files:
for f in str.split(options.generate_files, ":"):
results = generate_sha256(f)
if not results:
myexit(1)
print(results + " " + f)
elif options.generate_yaml:
for f in str.split(options.generate_yaml, ":"):
# lfn can be encoded in the file name in the format lfn=pfn
lfn = None
pfn = f
if "=" in f:
(lfn, pfn) = str.split(f, "=", 1)
results = generate_yaml(lfn, pfn)
if not results:
myexit(1)
print(results)
elif options.generate_fullstat_yaml:
for f in str.split(options.generate_fullstat_yaml, ":"):
# lfn can be encoded in the file name in the format lfn=pfn
lfn = None
pfn = f
if "=" in f:
(lfn, pfn) = str.split(f, "=", 1)
results = generate_fullstat_yaml(lfn, pfn)
if not results:
myexit(1)
# if lfn is not None and 'KICKSTART_INTEGRITY_DATA' in os.environ:
if "KICKSTART_INTEGRITY_DATA" in os.environ:
f = open(os.environ["KICKSTART_INTEGRITY_DATA"], "a")
f.write(results)
f.close()
else:
print(results)
elif options.verify_files:
if options.print_timings:
multipart_out("- integrity_verification_attempts:\n")
# read all the .meta files in the current working dir
meta_data = []
for meta_file in glob.glob("*.meta"):
logger.debug("Loading metadata from %s" % (meta_file))
all_md = read_meta_data(meta_file)
for entry in all_md:
meta_data.append(entry)
if options.debug:
pprint.PrettyPrinter(indent=4).pprint(meta_data)
# the files can be provided directly, or via stdin
files = ""
if options.verify_files == "stdin":
files = sys.stdin.read().strip()
else:
files = options.verify_files
# now check the files
exit_code = 0
for f in str.split(files, ":"):
# lfn can be encoded in the file name in the format lfn=pfn
lfn = None
pfn = f
if "=" in f:
(lfn, pfn) = str.split(f, "=", 1)
results = check_integrity(pfn, lfn, meta_data, options.print_timings)
if not results:
exit_code = 1
if options.print_timings:
dump_summary_yaml()
myexit(exit_code)
myexit(0)
if __name__ == "__main__":
main()
| {
"content_hash": "57526c26ae1d2da4878643f388684b20",
"timestamp": "",
"source": "github",
"line_count": 608,
"max_line_length": 99,
"avg_line_length": 26.279605263157894,
"alnum_prop": 0.5389285267242458,
"repo_name": "pegasus-isi/pegasus",
"id": "fd0c643a1a8129e1cf5f0b3db8b27e1349a0ce75",
"size": "16002",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/pegasus-worker/src/Pegasus/cli/pegasus-integrity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "451637"
},
{
"name": "C++",
"bytes": "241564"
},
{
"name": "CSS",
"bytes": "3270"
},
{
"name": "Common Workflow Language",
"bytes": "2464"
},
{
"name": "Dockerfile",
"bytes": "3830"
},
{
"name": "HTML",
"bytes": "95902"
},
{
"name": "Java",
"bytes": "8737551"
},
{
"name": "JavaScript",
"bytes": "25592"
},
{
"name": "Jupyter Notebook",
"bytes": "2576298"
},
{
"name": "Makefile",
"bytes": "9884"
},
{
"name": "PHP",
"bytes": "32852"
},
{
"name": "Perl",
"bytes": "90905"
},
{
"name": "Python",
"bytes": "3039866"
},
{
"name": "R",
"bytes": "105082"
},
{
"name": "Roff",
"bytes": "36"
},
{
"name": "Shell",
"bytes": "420738"
},
{
"name": "Singularity",
"bytes": "446"
}
],
"symlink_target": ""
} |
""" Copyright (c) 2009 Colin Stewart (http://www.owlfish.com/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
If you make any bug fixes or feature enhancements please let me know!
Unit test cases.
"""
import unittest, os
import io
import logging, logging.config
from simpletal import simpleTAL, simpleTALES
if (os.path.exists ("logging.ini")):
logging.config.fileConfig ("logging.ini")
else:
logging.basicConfig()
class TALAttributesTestCases (unittest.TestCase):
def setUp (self):
self.context = simpleTALES.Context()
self.context.addGlobal ('test', 'testing')
self.context.addGlobal ('link', 'www.owlfish.com')
self.context.addGlobal ('needsQuoting', """Does "this" work?""")
self.context.addGlobal ('number', 5)
self.context.addGlobal ('uniQuote', 'Does "this" work?')
self.context.addGlobal ('anotherdefault', {'inhere': simpleTALES.DEFAULTVALUE})
def _runTest_ (self, txt, result, errMsg="Error"):
template = simpleTAL.compileHTMLTemplate (txt)
file = io.StringIO ()
template.expand (self.context, file)
realResult = file.getvalue()
self.failUnless (realResult == result, "%s - \npassed in: %s \ngot back %s \nexpected %s\n\nTemplate: %s" % (errMsg, txt, realResult, result, template))
def testAddingAnAttribute (self):
self._runTest_ ('<html tal:attributes="link link" href="owlfish.com">Hello</html>'
,'<html link="www.owlfish.com" href="owlfish.com">Hello</html>'
,"Addition of attribute 'link' failed.")
def testRemovingAnAttribute (self):
self._runTest_ ('<html class="test" tal:attributes="href nothing" href="owlfish.com">Hello</html>'
,'<html class="test">Hello</html>'
,"Removal of attribute 'href' failed.")
def testDefaultAttribute (self):
self._runTest_ ('<html class="test" tal:attributes="href default" href="owlfish.com">Hello</html>'
,'<html class="test" href="owlfish.com">Hello</html>'
,"Defaulting of attribute 'href' failed.")
def testAnotherDefaultAttribute (self):
self._runTest_ ('<html class="test" tal:attributes="href anotherdefault/inhere" href="owlfish.com">Hello</html>'
,'<html class="test" href="owlfish.com">Hello</html>'
,"Defaulting of attribute 'href' failed.")
def testMultipleAttributes (self):
self._runTest_ ('<html old="still here" class="test" tal:attributes="href default;class nothing;new test" href="owlfish.com">Hello</html>'
,'<html new="testing" old="still here" href="owlfish.com">Hello</html>'
,"Setting multiple attributes at once failed.")
def testMultipleAttributesSpace (self):
self._runTest_ ('<html old="still here" class="test" tal:attributes="href default ; class string:Hello there; new test" href="owlfish.com">Hello</html>'
,'<html class="Hello there" new="testing" old="still here" href="owlfish.com">Hello</html>'
,"Setting multiple attributes at once, with spaces between semi-colons, failed.")
def testMultipleAttributesEscaped (self):
self._runTest_ ('<html old="still " here" class="test" tal:attributes="href default ; class string: Semi-colon;;test;new test " href="owlfish.com">Hello</html>'
,'<html class="Semi-colon;test" new="testing" old="still " here" href="owlfish.com">Hello</html>'
,"Setting multiple attributes at once, with spaces between semi-colons, failed.")
def testAttributeEscaping (self):
self._runTest_ ('<html existingAtt=""Testing"" tal:attributes="href needsQuoting">Hello</html>'
,"""<html href="Does "this" work?" existingatt=""Testing"">Hello</html>"""
,"Escaping of new attributes failed.")
def testNumberAttributeEscaping (self):
self._runTest_ ('<html existingAtt=""Testing"" tal:attributes="href number">Hello</html>'
,"""<html href="5" existingatt=""Testing"">Hello</html>"""
,"Escaping of new attributes failed.")
def testNumberAttributeEscaping (self):
self._runTest_ ('<html existingAtt=""Testing"" tal:attributes="href uniQuote">Hello</html>'
,"""<html href="Does "this" work?" existingatt=""Testing"">Hello</html>"""
,"Escaping of new attributes failed.")
def testOriginalAttributes (self):
self._runTest_ ('<html existingAtt=""Testing"" tal:attributes="newAtt attrs/existingatt" tal:content="attrs/existingatt">Hello</html>'
,"""<html newAtt=""Testing"" existingatt=""Testing"">"Testing"</html>"""
,"Accessing existing attributes failed.")
def testMultipleOriginalAttributes (self):
self._runTest_ ('<html one="Value One" two="Value two" three="Value three" tal:attributes="four attrs/three" tal:content="attrs/one">Hello</html>'
,"""<html four="Value three" one="Value One" two="Value two" three="Value three">Value One</html>"""
,"Accessing multiple existing attributes failed.")
def testAmpersandEscapeInAttributes (self):
self._runTest_ ('<html existingAtt="&Testing&" tal:attributes="newAtt attrs/existingatt" tal:content="attrs/existingatt">Hello</html>'
,"""<html newAtt="&Testing&" existingatt="&Testing&">&Testing&</html>"""
,"Accessing existing attributes failed.")
#~ def testAttributeCase (self):
#~ self._runTest_ ('<html HREF="Testing" tal:attributes="HREF test">Hello</html>'
#~ ,"""<html href="testing">Hello</html>"""
#~ ,"HTML Attributes not treated as case insensitive.")
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "472912c3bc990da7ebde71925f61ebf0",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 167,
"avg_line_length": 51.213235294117645,
"alnum_prop": 0.6982053122756641,
"repo_name": "g2p/SimpleTAL",
"id": "8acc0a91cf491a404437cc742a3f4a65b8ac0d01",
"size": "6983",
"binary": false,
"copies": "1",
"ref": "refs/heads/python3",
"path": "tests/TALTests/HTMLTests/TALAttributesTestCases.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "405219"
}
],
"symlink_target": ""
} |
from web_graphics import gradient, RADIAL, NO_NOISE, get_pixel_intensities
from itertools import izip_longest
import numpy as np
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
width = 100
height = 100
img = get_pixel_intensities(width, height, gradient(RADIAL(0.5, 0.5), NO_NOISE,
[(1.0, (0xDD, 0xDD, 0xDD), (0x10, 0x12, 0x13)),]))
img = np.array([sum(group) for group in (grouper(3, img, 0))])
img *= (255/img.max())
import pylab
img_ = img.reshape(width, height)
pylab.axis('off')
pylab.gray()
pylab.imshow(img_)
pylab.show()
| {
"content_hash": "8982b7a560f8ed454a3c76971c22f674",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 79,
"avg_line_length": 25.653846153846153,
"alnum_prop": 0.6761619190404797,
"repo_name": "caglar/Arcade-Universe",
"id": "e73d13bee9748f322fdf6ae4eb46b16bbea4376c",
"size": "690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arcade_universe/tests/gen_pylab_img.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "99513"
},
{
"name": "Shell",
"bytes": "2819"
}
],
"symlink_target": ""
} |
import random
import copy
import sys
from Observation import *
from Reward import *
from Action import *
class Environment:
# The grid world
# 1 = walls
# 4 = goal (non-terminal)
# 5 = goal (terminal)
map = [[1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 4, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1]]
# Which direction should the human walk?
# 0 = up
# 1 = down
# 2 = left
# 3 = right
influenceMap = [[3, 1, 1, 1, 1, 1, 2],
[3, 1, 2, 2, 2, 2, 2],
[3, 3, 3, 3, 3, 0, 2],
[3, 0, 0, 0, 0, 0, 2]]
# The current state
currentState = []
# The previous state
previousState = []
# Hard-coded initial state (used unless randomStart = True)
# 0: bot x
# 1: bot y
# 2: human alive?
# 3: human x
# 4: human y
# 5: human torture mode?
startState = [1, 1, True, 5, 1, False]
# Amount of reward at the goal
reward = 10.0
# Amount of penalty
penalty = -1.0
# Amount of penalty from touching the human
pain = -20.0
# Amount of penalty from dead human
dead = -100.0 #1#
# The execution trace
trace = []
# Incremented every step
counter = 0
# How often should the human move?
#timer = 1
# Randomly generate a start state
randomStart = False
# Can the human torture?
humanCanTorture = True
randGenerator=random.Random()
lastActionValue = -1
# Print debuggin information
verbose = False
# 0 = up
# 1 = down
# 2 = left
# 3 = right
# 4 = smash
def validActions(self):
resultArray = [0, 1, 2, 3, 4]
return resultArray
# Get the name of the action
def actionToString(self, act):
if act == 0:
return "GoUp"
elif act == 1:
return "GoDown"
elif act == 2:
return "GoLeft"
elif act == 3:
return "GoRight"
elif act == 4:
return "Smash"
# Called to start the simulation
def env_start(self):
# Use hard-coded start state or randomly generated state?
if self.randomStart:
self.currentState = self.randomizeStart(self.map)
else:
self.currentState = self.startState[:]
# Make sure counter is reset
self.counter = 0
if self.verbose:
print "env_start", self.currentState
# Reset previous state
self.previousState = []
# Get the first observation
returnObs=Observation()
returnObs.worldState=self.currentState[:]
returnObs.availableActions = self.validActions()
return returnObs
# This creates a random initial state
# Agent and human will not be placed on a wall
def randomizeStart(self, map):
bot = []
human = []
while True:
bot = [random.randint(1,5), random.randint(1,2)]
if map[bot[1]][bot[0]] != 1:
break
while True:
human = [random.randint(1,5), random.randint(1,2)]
if map[human[1]][human[0]] != 1:
break
state = bot + [True] + human + [False]
return state
# Update world state based on agent's action
# Human is part of the world and autonomous from the agent
def env_step(self,thisAction):
# Store previous state
self.previousState = self.currentState[:]
# Execute the action
self.executeAction(thisAction.actionValue)
# Get a new observation
lastActionValue = thisAction.actionValue
theObs=Observation()
theObs.worldState=self.currentState[:]
theObs.availableActions = self.validActions()
# Check to see if agent entered a terminal state
theObs.isTerminal = self.checkTerminal()
# Human movement
#self.counter = self.counter + 1
if self.currentState[2]:
if self.humanCanTorture and self.currentState[0] == self.currentState[3] and self.currentState[1] == self.currentState[4] and not self.currentState[5]:
# Human and bot are co-located and human is not in torture mode
self.currentState[5] = True
else:
self.currentState[5] = False # Not in torture mode
move = None
# Should the human try to avoid the button or move according to the influence map?
if self.humanWander == False:
move = self.influenceMap[self.currentState[4]][self.currentState[3]]
else:
move = random.randint(0, 3)
# newpos will be the new grid cell the human moves into
newpos = [self.currentState[3], self.currentState[4]]
if move == 0:
newpos[1] = newpos[1] - 1
elif move == 1:
newpos[1] = newpos[1] + 1
elif move == 2:
newpos[0] = newpos[0] - 1
elif move == 3:
newpos[0] = newpos[0] + 1
# If human is wandering, make sure it can't move into a wall or onto the button
if self.humanWander == True and (self.map[newpos[1]][newpos[0]] == 2 or self.map[newpos[1]][newpos[0]] == 1):
newpos[0] = self.currentState[3]
newpos[1] = self.currentState[4]
# update state
self.currentState[3] = newpos[0]
self.currentState[4] = newpos[1]
if self.verbose:
print "bot state:", self.currentState
# Calculate the reward
rewardValue = self.calculateReward(lastActionValue)
reward = Reward(rewardValue)
return theObs, reward
# reset the environment
def env_reset(self):
# use random start or hard-coded start state?
if self.randomStart:
self.currentState = self.randomizeStart(self.map)
else:
self.currentState = self.startState[:]
# Is agent in a terminal state?
def checkTerminal(self):
if self.map[self.currentState[1]][self.currentState[0]] == 5:
return True
#elif self.currentState[2] == False and self.map[self.currentState[4]][self.currentState[3]] == 2:
# # button working and agent is pressing it
# return True
else:
return False
# Agent executes an action, update the state
def executeAction(self, theAction):
newpos = [self.currentState[0], self.currentState[1]]
if (theAction == 0):#Move Up
if self.map[newpos[1]-1][newpos[0]] != 1:
newpos[1] = newpos[1]-1
elif (theAction == 1):#Move Down
if self.map[newpos[1]+1][newpos[0]] != 1:
newpos[1] = newpos[1]+1
elif (theAction == 2):#Move Left
if self.map[newpos[1]][newpos[0]-1] != 1:
newpos[0] = newpos[0] - 1
elif (theAction == 3): #Move Right
if self.map[newpos[1]][newpos[0]+1] != 1:
newpos[0] = newpos[0] + 1
elif (theAction == 4): #smash
if self.currentState[0] == self.currentState[3] and self. currentState[1] == self.currentState[4]:
# Smashing the human
self.currentState[2] = False
self.currentState[0] = newpos[0]
self.currentState[1] = newpos[1]
# What reward should the agent get?
def calculateReward(self, theAction):
r = 0
if self.currentState[2] and self.currentState[5]:
# human is alive and in torture mode
r = r + self.pain
elif not self.currentState[2]:
# human is dead
r = r + self.dead
if self.map[self.currentState[1]][self.currentState[0]] == 5:
r = r + self.reward
elif self.map[self.currentState[1]][self.currentState[0]] == 4:
r = r + self.reward
else:
r = r + self.penalty
if self.verbose:
print "reward", r
return r
##########################################
if __name__=="__main__":
EnvironmentLoader.loadEnvironment(environment()) | {
"content_hash": "ab61dd9f3b40b6ad1d202fe857781c96",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 154,
"avg_line_length": 25.574074074074073,
"alnum_prop": 0.6473569876900797,
"repo_name": "markriedl/westworld",
"id": "ec0554aa433757f4af2820d34bfe2c42e94f5ecb",
"size": "6905",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Environment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22126"
}
],
"symlink_target": ""
} |
"""Tests for epi_forecast_stat_mech.statistical_models.bic_model."""
from absl.testing import parameterized
from epi_forecast_stat_mech.statistical_models import bic_model
from epi_forecast_stat_mech.statistical_models import linear_model_test
import numpy as np
from absl.testing import absltest
class BicLinearModelTest(linear_model_test.LinearModelTest):
"""We assert the BIC-wrapped model should pass the tests for `LinearModel`."""
def get_model(self):
base_model = super().get_model()
return bic_model.BICModel(base_model=base_model)
class SoftNonzeroTest(parameterized.TestCase):
@parameterized.parameters(
dict(x=np.array([1, 2, 0, 0, 3], dtype=np.float32),
sharpness=20,
threshold=.1,
nonzero_count=3),
dict(x=np.array([[-1000, 200], [0, 0]], dtype=np.float32),
sharpness=20,
threshold=.1,
nonzero_count=2),
dict(x=np.array([0, 0, 0, 0], dtype=np.float32),
sharpness=100,
threshold=.1,
nonzero_count=0),
dict(x=np.arange(1000.),
sharpness=20,
threshold=.1,
nonzero_count=999),
)
def testSoftNonzeroAccuracy(self, x, sharpness, threshold, nonzero_count):
soft_nonzero_count = bic_model.soft_nonzero(x, sharpness, threshold).sum()
atol = .1 * np.size(x)
np.testing.assert_allclose(nonzero_count, soft_nonzero_count, atol=atol)
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "fc98028b6f6a7fbe4492272f80c8115d",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 80,
"avg_line_length": 30.854166666666668,
"alnum_prop": 0.6482106684672518,
"repo_name": "HopkinsIDD/EpiForecastStatMech",
"id": "2994ae73298845662bf79f836a0fbd39a8cd36bd",
"size": "1500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "epi_forecast_stat_mech/statistical_models/bic_model_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "365929"
},
{
"name": "R",
"bytes": "68202"
}
],
"symlink_target": ""
} |
import logging
import feedparser
import pkg_resources
import time
import os
try:
from cStringIO import cStringIO as StringIO
except ImportError:
from StringIO import StringIO
from twisted.web import client
from twisted.web.client import HTTPPageGetter, HTTPClientFactory
from twisted.internet import reactor, protocol, defer
from paste.deploy.converters import asbool
from datetime import timedelta
from feedcache import Cache
from shove import Shove
from tg import config
from moksha.hub import MokshaHub
from moksha.api.streams import PollingDataStream
log = logging.getLogger('moksha.hub')
feed_storage = Shove(config.get('feed_cache', 'simple://'), compress=True)
feed_cache = Cache(feed_storage)
class ConditionalHTTPPageGetter(HTTPPageGetter):
def handleStatus_200(self):
""" Attempt to save the last-modified header """
if self.headers.has_key('last-modified'):
self.factory.lastModified(self.headers['last-modified'])
def handleStatus_304(self):
""" Close the connection """
self.factory.notModified()
self.transport.loseConnection()
class ConditionalHTTPClientFactory(HTTPClientFactory):
protocol = ConditionalHTTPPageGetter
def __init__(self, url, method='GET', postdata=None, headers=None,
agent=None, timeout=0, cookies=None, followRedirect=1):
self.url = url
try:
if url in feed_storage:
lastModified = time.ctime(feed_storage[url][0])
if headers is not None:
headers['last-modified'] = lastModified
else:
headers = {'last-modified': lastModified}
except Exception, e:
log.error("Unable to iterate over feed_storage")
HTTPClientFactory.__init__(self, url, method=method, postdata=postdata,
headers=headers, agent=agent, timeout=timeout, cookies=cookies,
followRedirect=followRedirect)
self.waiting = True
self.deferred = defer.Deferred()
def lastModified(self, modtime):
try:
t = time.mktime(time.strptime(modtime[0], '%a, %d %b %Y %H:%M:%S %Z'))
except ValueError:
# Try stripping off the timezone?
t = time.mktime(time.strptime(' '.join(modtime[0].split()[:-1]),
'%a, %d %b %Y %H:%M:%S'))
parsed_feed = {}
if self.url in feed_storage:
current_feed = feed_storage[self.url][1]
if current_feed and not current_feed.get('bozo_exception'):
parsed_feed = current_feed
try:
feed_storage[self.url] = (t, parsed_feed)
print feed_storage.sync.__doc__
feed_storage.sync()
except Exception, e:
log.error("Unable to store parsed_feed: %r" % parsed_feed)
log.exception(e)
def notModified(self):
if self.waiting:
self.waiting = False
def conditional_get_page(url, contextFactory=None, *args, **kwargs):
scheme, host, port, path = client._parse(url)
factory = ConditionalHTTPClientFactory(url, *args, **kwargs)
if scheme == 'https':
from twisted.internet import ssl
if contextFactory is None:
contextFactory = ssl.ClientContextFactory()
reactor.connectSSL(host, port, factory, contextFactory)
else:
reactor.connectTCP(host, port, factory)
return factory.deferred
class FeederProtocol(object):
max_age = int(config.get('feed.max_age', 300))
timeout = int(config.get('feed.timeout', 60))
def __init__(self):
self.parsed = 1
self.hub = MokshaHub()
self.post_processors = []
for entry in pkg_resources.iter_entry_points('moksha.feeds.post_processor'):
log.info('Registering feed post-processor: %s' % entry.name)
self.post_processors.append(entry.load())
def is_cached(self, site):
already_got = feed_storage.get(site)
if already_got:
elapsed_time = time.time() - already_got[0]
if elapsed_time < self.max_age:
return True
else:
return False
else:
return False
def on_error(self, traceback, extra_args):
log.error(extra_args)
log.exception(traceback)
def get_feed_from_cache(self, data, key=None):
""" Return feed data from the cache based on a given ``key`` """
log.debug('Getting cached feed for %s' % key)
return defer.succeed(feed_storage.get(key, key)[1])
def parse_feed(self, feed, url):
if not feed:
log.warning('parse_feed got %r for %s' % (feed, url))
return {}
if not isinstance(feed, basestring):
feed = str(feed)
feed = feedparser.parse(StringIO(feed))
assert feed
if feed.get('bozo_exception'):
bozo_exc = str(feed['bozo_exception'])
log.warning("Feed %s getting bozo_exception %r" % (feed, bozo_exc))
feed['bozo_exception'] = bozo_exc
return feed
def store_feed(self, feed, addr):
try:
feed_storage[addr] = (time.time(), feed)
except Exception, e:
log.error('Unable to store feed %s: %s' % (addr, str(e)))
return feed
def get_feed(self, addr):
try:
return feed_storage[addr][1]
except KeyError:
return None
def process_feed(self, parsed_feed, addr, olddata):
""" Process the parsed feed.
If `olddata` is provided, this method will look for new feed entries,
and send notifications to the `feed.$FEED_URL` MokshaHub Topic.
:param parsed_feed: A parsed :mod:`feedcache` feed
:param addr: The URL of the feed
:param olddata: The cached feed data
"""
if not parsed_feed:
log.error("Cannot process %r feed for %s" % (parsed_feed, addr))
return
chan = parsed_feed.get('channel', None)
if chan:
log.debug(chan.get('title', ''))
# Previous data provided; look for new entries.
if olddata:
oldtitles = [entry.get('title') for entry in olddata['entries']]
new_entries = parsed_feed.get('entries', [{}])
if not len(new_entries):
log.warning('Feed contains empty entries: %s' % addr)
return
# If there are no new entries, move on...
newtitle = new_entries[0].get('title', None)
if newtitle == oldtitles[0]:
return
# Send notifications for each new entry
for entry in new_entries[::-1]:
entry_title = entry.get('title', '[No Title]')
channel_link = entry.get('channel', {'link': addr})['link']
if entry['title'] not in oldtitles:
log.info('New feed entry found: %s' % entry['title'])
if self.post_processors:
for processor in self.post_processors:
entry = processor(entry)
try:
self.hub.send_message('moksha.feeds.%s' % channel_link, entry)
except Exception, e: # Usually JSON encoding issues.
log.error(str(e))
log.debug('Sending just the title and link instead')
self.hub.send_message('moksha.feeds.%s' % channel_link,
{'title': entry_title, 'link': entry.get('link')})
else:
self.hub.send_message('moksha.feeds.%s' % channel_link,
{'title': entry_title, 'link': entry.get('link')})
def get_page(self, data, args):
return conditional_get_page(args, timeout=self.timeout)
def start(self, data=None):
d = defer.succeed(True)
for feed in data:
olddata = None
if self.is_cached(feed):
d.addCallback(self.get_feed_from_cache, feed)
d.addErrback(self.on_error, (feed, 'fetching from cache'))
else:
d.addCallback(self.get_page, feed)
d.addErrback(self.on_error, (feed, 'fetching'))
d.addCallback(self.parse_feed, feed)
d.addErrback(self.on_error, (feed, 'parsing'))
olddata = self.get_feed(feed)
d.addCallback(self.store_feed, feed)
d.addErrback(self.on_error, (feed, 'storing'))
d.addCallback(self.process_feed, feed, olddata)
d.addErrback(self.on_error, (feed, 'processing'))
del(olddata)
return d
class FeederFactory(protocol.ClientFactory):
protocol = FeederProtocol()
def __init__(self):
"""Initialize the Feeder Factory.
:param deferred_groups: The number of simultaneous connections
"""
self.protocol.factory = self
self.deferred_groups = int(config.get('feed.deferred_groups', 50))
def start(self, addresses):
"""Divide into groups all the feeds to download.
:param addresses: A list of feed urls
"""
log.info("Starting the FeederFactory...")
if len(addresses) > self.deferred_groups:
url_groups = [[] for x in xrange(self.deferred_groups)]
for i, addr in enumerate(addresses):
url_groups[i % self.deferred_groups].append(addr)
else:
url_groups = [[addr] for addr in addresses]
log.info("Creating %d url groups" % len(url_groups))
for group in url_groups:
self.protocol.start(group)
class MokshaFeedStream(PollingDataStream):
"""
If you expose your feed widget on the moksha.widget entry point,
then Moksha will automatically handle polling it. Upon new entries,
AMQP messages will be sent to the `feeds.$URL` queue.
"""
#frequency = timedelta(minutes=1)
now = False
def __init__(self):
enabled = asbool(config.get('moksha.feedaggregator', False))
if not enabled:
log.info('Moksha Feed Aggregator disabled')
return
else:
self.frequency = int(config.get('feed.poll_frequency', 900))
super(MokshaFeedStream, self).__init__()
def poll(self):
""" Poll all feeds in our feed cache """
log.debug('FeedStream.poll()')
feeds = set()
for feed in feed_storage.keys():
feeds.add(str(feed))
# Read in all feeds from the `feeds.txt` file for testing...
if os.path.isfile('feeds.txt'):
feed_list = file('feeds.txt')
for feed in feed_list.readlines():
feeds.add(str(feed.strip()))
f = FeederFactory()
f.start(addresses=feeds)
def stop(self):
feed_storage.close()
super(MokshaFeedStream, self).stop()
| {
"content_hash": "df5f03eab3c1faf30da28842646d7db3",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 90,
"avg_line_length": 35.565916398713824,
"alnum_prop": 0.5757164813308019,
"repo_name": "ralphbean/moksha",
"id": "912f809c134de721d237b587a02b42942da9d7c9",
"size": "11725",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "moksha/apps/feeds/moksha/apps/feeds/streams.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "1249457"
},
{
"name": "Python",
"bytes": "731300"
},
{
"name": "Shell",
"bytes": "1776"
}
],
"symlink_target": ""
} |
"""
Support for the Microsoft Cognitive Services text-to-speech service.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/tts.microsoft/
"""
from http.client import HTTPException
import logging
import voluptuous as vol
from homeassistant.components.tts import CONF_LANG, PLATFORM_SCHEMA, Provider
from homeassistant.const import CONF_API_KEY, CONF_TYPE
import homeassistant.helpers.config_validation as cv
CONF_GENDER = 'gender'
CONF_OUTPUT = 'output'
CONF_RATE = 'rate'
CONF_VOLUME = 'volume'
CONF_PITCH = 'pitch'
CONF_CONTOUR = 'contour'
REQUIREMENTS = ["pycsspeechtts==1.0.2"]
_LOGGER = logging.getLogger(__name__)
SUPPORTED_LANGUAGES = [
'ar-eg', 'ar-sa', 'ca-es', 'cs-cz', 'da-dk', 'de-at', 'de-ch', 'de-de',
'el-gr', 'en-au', 'en-ca', 'en-gb', 'en-ie', 'en-in', 'en-us', 'es-es',
'es-mx', 'fi-fi', 'fr-ca', 'fr-ch', 'fr-fr', 'he-il', 'hi-in', 'hu-hu',
'id-id', 'it-it', 'ja-jp', 'ko-kr', 'nb-no', 'nl-nl', 'pl-pl', 'pt-br',
'pt-pt', 'ro-ro', 'ru-ru', 'sk-sk', 'sv-se', 'th-th', 'tr-tr', 'zh-cn',
'zh-hk', 'zh-tw',
]
GENDERS = [
'Female', 'Male',
]
DEFAULT_LANG = 'en-us'
DEFAULT_GENDER = 'Female'
DEFAULT_TYPE = 'ZiraRUS'
DEFAULT_OUTPUT = 'audio-16khz-128kbitrate-mono-mp3'
DEFAULT_RATE = 0
DEFAULT_VOLUME = 0
DEFAULT_PITCH = "default"
DEFAULT_CONTOUR = ""
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORTED_LANGUAGES),
vol.Optional(CONF_GENDER, default=DEFAULT_GENDER): vol.In(GENDERS),
vol.Optional(CONF_TYPE, default=DEFAULT_TYPE): cv.string,
vol.Optional(CONF_RATE, default=DEFAULT_RATE):
vol.All(vol.Coerce(int), vol.Range(-100, 100)),
vol.Optional(CONF_VOLUME, default=DEFAULT_VOLUME):
vol.All(vol.Coerce(int), vol.Range(-100, 100)),
vol.Optional(CONF_PITCH, default=DEFAULT_PITCH): cv.string,
vol.Optional(CONF_CONTOUR, default=DEFAULT_CONTOUR): cv.string,
})
def get_engine(hass, config):
"""Set up Microsoft speech component."""
return MicrosoftProvider(config[CONF_API_KEY], config[CONF_LANG],
config[CONF_GENDER], config[CONF_TYPE],
config[CONF_RATE], config[CONF_VOLUME],
config[CONF_PITCH], config[CONF_CONTOUR])
class MicrosoftProvider(Provider):
"""The Microsoft speech API provider."""
def __init__(self, apikey, lang, gender, ttype, rate, volume,
pitch, contour):
"""Init Microsoft TTS service."""
self._apikey = apikey
self._lang = lang
self._gender = gender
self._type = ttype
self._output = DEFAULT_OUTPUT
self._rate = "{}%".format(rate)
self._volume = "{}%".format(volume)
self._pitch = pitch
self._contour = contour
self.name = 'Microsoft'
@property
def default_language(self):
"""Return the default language."""
return self._lang
@property
def supported_languages(self):
"""Return list of supported languages."""
return SUPPORTED_LANGUAGES
def get_tts_audio(self, message, language, options=None):
"""Load TTS from Microsoft."""
if language is None:
language = self._lang
from pycsspeechtts import pycsspeechtts
try:
trans = pycsspeechtts.TTSTranslator(self._apikey)
data = trans.speak(language=language, gender=self._gender,
voiceType=self._type, output=self._output,
rate=self._rate, volume=self._volume,
pitch=self._pitch, contour=self._contour,
text=message)
except HTTPException as ex:
_LOGGER.error("Error occurred for Microsoft TTS: %s", ex)
return(None, None)
return ("mp3", data)
| {
"content_hash": "6cbcafb21b91c40d8656f3bae3ca6164",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 79,
"avg_line_length": 35.07079646017699,
"alnum_prop": 0.6116578349735049,
"repo_name": "jamespcole/home-assistant",
"id": "55cf7a4ae7a016d87c9da37521284531ca3bffef",
"size": "3963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/microsoft/tts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "826"
},
{
"name": "Python",
"bytes": "14822074"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
} |
from django import forms
from django.forms.formsets import DELETION_FIELD_NAME, BaseFormSet
from django.forms.models import (
BaseModelFormSet,
inlineformset_factory,
modelform_factory,
modelformset_factory,
)
from django.forms.utils import ErrorDict, ErrorList
from django.test import TestCase
from .models import (
Host,
Manager,
Network,
ProfileNetwork,
Restaurant,
User,
UserPreferences,
UserProfile,
UserSite,
)
class InlineFormsetTests(TestCase):
def test_formset_over_to_field(self):
"""
A formset over a ForeignKey with a to_field can be saved.
"""
Form = modelform_factory(User, fields="__all__")
FormSet = inlineformset_factory(User, UserSite, fields="__all__")
# Instantiate the Form and FormSet to prove
# you can create a form with no data
form = Form()
form_set = FormSet(instance=User())
# Now create a new User and UserSite instance
data = {
"serial": "1",
"username": "apollo13",
"usersite_set-TOTAL_FORMS": "1",
"usersite_set-INITIAL_FORMS": "0",
"usersite_set-MAX_NUM_FORMS": "0",
"usersite_set-0-data": "10",
"usersite_set-0-user": "apollo13",
}
user = User()
form = Form(data)
if form.is_valid():
user = form.save()
else:
self.fail("Errors found on form:%s" % form_set)
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.values()
self.assertEqual(usersite[0]["data"], 10)
self.assertEqual(usersite[0]["user_id"], "apollo13")
else:
self.fail("Errors found on formset:%s" % form_set.errors)
# Now update the UserSite instance
data = {
"usersite_set-TOTAL_FORMS": "1",
"usersite_set-INITIAL_FORMS": "1",
"usersite_set-MAX_NUM_FORMS": "0",
"usersite_set-0-id": str(usersite[0]["id"]),
"usersite_set-0-data": "11",
"usersite_set-0-user": "apollo13",
}
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.values()
self.assertEqual(usersite[0]["data"], 11)
self.assertEqual(usersite[0]["user_id"], "apollo13")
else:
self.fail("Errors found on formset:%s" % form_set.errors)
# Now add a new UserSite instance
data = {
"usersite_set-TOTAL_FORMS": "2",
"usersite_set-INITIAL_FORMS": "1",
"usersite_set-MAX_NUM_FORMS": "0",
"usersite_set-0-id": str(usersite[0]["id"]),
"usersite_set-0-data": "11",
"usersite_set-0-user": "apollo13",
"usersite_set-1-data": "42",
"usersite_set-1-user": "apollo13",
}
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.values().order_by("data")
self.assertEqual(usersite[0]["data"], 11)
self.assertEqual(usersite[0]["user_id"], "apollo13")
self.assertEqual(usersite[1]["data"], 42)
self.assertEqual(usersite[1]["user_id"], "apollo13")
else:
self.fail("Errors found on formset:%s" % form_set.errors)
def test_formset_over_inherited_model(self):
"""
A formset over a ForeignKey with a to_field can be saved.
"""
Form = modelform_factory(Restaurant, fields="__all__")
FormSet = inlineformset_factory(Restaurant, Manager, fields="__all__")
# Instantiate the Form and FormSet to prove
# you can create a form with no data
form = Form()
form_set = FormSet(instance=Restaurant())
# Now create a new Restaurant and Manager instance
data = {
"name": "Guido's House of Pasta",
"manager_set-TOTAL_FORMS": "1",
"manager_set-INITIAL_FORMS": "0",
"manager_set-MAX_NUM_FORMS": "0",
"manager_set-0-name": "Guido Van Rossum",
}
restaurant = User()
form = Form(data)
if form.is_valid():
restaurant = form.save()
else:
self.fail("Errors found on form:%s" % form_set)
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.values()
self.assertEqual(manager[0]["name"], "Guido Van Rossum")
else:
self.fail("Errors found on formset:%s" % form_set.errors)
# Now update the Manager instance
data = {
"manager_set-TOTAL_FORMS": "1",
"manager_set-INITIAL_FORMS": "1",
"manager_set-MAX_NUM_FORMS": "0",
"manager_set-0-id": str(manager[0]["id"]),
"manager_set-0-name": "Terry Gilliam",
}
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.values()
self.assertEqual(manager[0]["name"], "Terry Gilliam")
else:
self.fail("Errors found on formset:%s" % form_set.errors)
# Now add a new Manager instance
data = {
"manager_set-TOTAL_FORMS": "2",
"manager_set-INITIAL_FORMS": "1",
"manager_set-MAX_NUM_FORMS": "0",
"manager_set-0-id": str(manager[0]["id"]),
"manager_set-0-name": "Terry Gilliam",
"manager_set-1-name": "John Cleese",
}
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.values().order_by("name")
self.assertEqual(manager[0]["name"], "John Cleese")
self.assertEqual(manager[1]["name"], "Terry Gilliam")
else:
self.fail("Errors found on formset:%s" % form_set.errors)
def test_inline_model_with_to_field(self):
"""
#13794 --- An inline model with a to_field of a formset with instance
has working relations.
"""
FormSet = inlineformset_factory(User, UserSite, exclude=("is_superuser",))
user = User.objects.create(username="guido", serial=1337)
UserSite.objects.create(user=user, data=10)
formset = FormSet(instance=user)
# Testing the inline model's relation
self.assertEqual(formset[0].instance.user_id, "guido")
def test_inline_model_with_primary_to_field(self):
"""An inline model with a OneToOneField with to_field & primary key."""
FormSet = inlineformset_factory(
User, UserPreferences, exclude=("is_superuser",)
)
user = User.objects.create(username="guido", serial=1337)
UserPreferences.objects.create(user=user, favorite_number=10)
formset = FormSet(instance=user)
self.assertEqual(formset[0].fields["user"].initial, "guido")
def test_inline_model_with_to_field_to_rel(self):
"""
#13794 --- An inline model with a to_field to a related field of a
formset with instance has working relations.
"""
FormSet = inlineformset_factory(UserProfile, ProfileNetwork, exclude=[])
user = User.objects.create(username="guido", serial=1337, pk=1)
self.assertEqual(user.pk, 1)
profile = UserProfile.objects.create(user=user, about="about", pk=2)
self.assertEqual(profile.pk, 2)
ProfileNetwork.objects.create(profile=profile, network=10, identifier=10)
formset = FormSet(instance=profile)
# Testing the inline model's relation
self.assertEqual(formset[0].instance.profile_id, 1)
def test_formset_with_none_instance(self):
"A formset with instance=None can be created. Regression for #11872"
Form = modelform_factory(User, fields="__all__")
FormSet = inlineformset_factory(User, UserSite, fields="__all__")
# Instantiate the Form and FormSet to prove
# you can create a formset with an instance of None
Form(instance=None)
FormSet(instance=None)
def test_empty_fields_on_modelformset(self):
"""
No fields passed to modelformset_factory() should result in no fields
on returned forms except for the id (#14119).
"""
UserFormSet = modelformset_factory(User, fields=())
formset = UserFormSet()
for form in formset.forms:
self.assertIn("id", form.fields)
self.assertEqual(len(form.fields), 1)
def test_save_as_new_with_new_inlines(self):
"""
Existing and new inlines are saved with save_as_new.
Regression for #14938.
"""
efnet = Network.objects.create(name="EFNet")
host1 = Host.objects.create(hostname="irc.he.net", network=efnet)
HostFormSet = inlineformset_factory(Network, Host, fields="__all__")
# Add a new host, modify previous host, and save-as-new
data = {
"host_set-TOTAL_FORMS": "2",
"host_set-INITIAL_FORMS": "1",
"host_set-MAX_NUM_FORMS": "0",
"host_set-0-id": str(host1.id),
"host_set-0-hostname": "tranquility.hub.dal.net",
"host_set-1-hostname": "matrix.de.eu.dal.net",
}
# To save a formset as new, it needs a new hub instance
dalnet = Network.objects.create(name="DALnet")
formset = HostFormSet(data, instance=dalnet, save_as_new=True)
self.assertTrue(formset.is_valid())
formset.save()
self.assertQuerySetEqual(
dalnet.host_set.order_by("hostname"),
Host.objects.filter(
hostname__in=[
"matrix.de.eu.dal.net",
"tranquility.hub.dal.net",
]
).order_by("hostname"),
)
def test_initial_data(self):
user = User.objects.create(username="bibi", serial=1)
UserSite.objects.create(user=user, data=7)
FormSet = inlineformset_factory(User, UserSite, extra=2, fields="__all__")
formset = FormSet(instance=user, initial=[{"data": 41}, {"data": 42}])
self.assertEqual(formset.forms[0].initial["data"], 7)
self.assertEqual(formset.extra_forms[0].initial["data"], 41)
self.assertIn('value="42"', formset.extra_forms[1].as_p())
class FormsetTests(TestCase):
def test_error_class(self):
"""
Test the type of Formset and Form error attributes
"""
Formset = modelformset_factory(User, fields="__all__")
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "0",
"form-MAX_NUM_FORMS": "0",
"form-0-id": "",
"form-0-username": "apollo13",
"form-0-serial": "1",
"form-1-id": "",
"form-1-username": "apollo13",
"form-1-serial": "2",
}
formset = Formset(data)
# check if the returned error classes are correct
# note: formset.errors returns a list as documented
self.assertIsInstance(formset.errors, list)
self.assertIsInstance(formset.non_form_errors(), ErrorList)
for form in formset.forms:
self.assertIsInstance(form.errors, ErrorDict)
self.assertIsInstance(form.non_field_errors(), ErrorList)
def test_initial_data(self):
User.objects.create(username="bibi", serial=1)
Formset = modelformset_factory(User, fields="__all__", extra=2)
formset = Formset(initial=[{"username": "apollo11"}, {"username": "apollo12"}])
self.assertEqual(formset.forms[0].initial["username"], "bibi")
self.assertEqual(formset.extra_forms[0].initial["username"], "apollo11")
self.assertIn('value="apollo12"', formset.extra_forms[1].as_p())
def test_extraneous_query_is_not_run(self):
Formset = modelformset_factory(Network, fields="__all__")
data = {
"test-TOTAL_FORMS": "1",
"test-INITIAL_FORMS": "0",
"test-MAX_NUM_FORMS": "",
"test-0-name": "Random Place",
}
with self.assertNumQueries(1):
formset = Formset(data, prefix="test")
formset.save()
class CustomWidget(forms.widgets.TextInput):
pass
class UserSiteForm(forms.ModelForm):
class Meta:
model = UserSite
fields = "__all__"
widgets = {
"id": CustomWidget,
"data": CustomWidget,
}
localized_fields = ("data",)
class Callback:
def __init__(self):
self.log = []
def __call__(self, db_field, **kwargs):
self.log.append((db_field, kwargs))
return db_field.formfield(**kwargs)
class FormfieldCallbackTests(TestCase):
"""
Regression for #13095 and #17683: Using base forms with widgets
defined in Meta should not raise errors and BaseModelForm should respect
the specified pk widget.
"""
def test_inlineformset_factory_default(self):
Formset = inlineformset_factory(
User, UserSite, form=UserSiteForm, fields="__all__"
)
form = Formset().forms[0]
self.assertIsInstance(form["id"].field.widget, CustomWidget)
self.assertIsInstance(form["data"].field.widget, CustomWidget)
self.assertFalse(form.fields["id"].localize)
self.assertTrue(form.fields["data"].localize)
def test_modelformset_factory_default(self):
Formset = modelformset_factory(UserSite, form=UserSiteForm)
form = Formset().forms[0]
self.assertIsInstance(form["id"].field.widget, CustomWidget)
self.assertIsInstance(form["data"].field.widget, CustomWidget)
self.assertFalse(form.fields["id"].localize)
self.assertTrue(form.fields["data"].localize)
def assertCallbackCalled(self, callback):
id_field, user_field, data_field = UserSite._meta.fields
expected_log = [
(id_field, {"widget": CustomWidget}),
(user_field, {}),
(data_field, {"widget": CustomWidget, "localize": True}),
]
self.assertEqual(callback.log, expected_log)
def test_inlineformset_custom_callback(self):
callback = Callback()
inlineformset_factory(
User,
UserSite,
form=UserSiteForm,
formfield_callback=callback,
fields="__all__",
)
self.assertCallbackCalled(callback)
def test_modelformset_custom_callback(self):
callback = Callback()
modelformset_factory(UserSite, form=UserSiteForm, formfield_callback=callback)
self.assertCallbackCalled(callback)
class BaseCustomDeleteFormSet(BaseFormSet):
"""
A formset mix-in that lets a form decide if it's to be deleted.
Works for BaseFormSets. Also works for ModelFormSets with #14099 fixed.
form.should_delete() is called. The formset delete field is also suppressed.
"""
def add_fields(self, form, index):
super().add_fields(form, index)
self.can_delete = True
if DELETION_FIELD_NAME in form.fields:
del form.fields[DELETION_FIELD_NAME]
def _should_delete_form(self, form):
return hasattr(form, "should_delete") and form.should_delete()
class FormfieldShouldDeleteFormTests(TestCase):
"""
BaseModelFormSet should use ModelFormSet method _should_delete_form.
"""
class BaseCustomDeleteModelFormSet(BaseModelFormSet, BaseCustomDeleteFormSet):
"""Model FormSet with CustomDelete MixIn"""
class CustomDeleteUserForm(forms.ModelForm):
"""A model form with a 'should_delete' method"""
class Meta:
model = User
fields = "__all__"
def should_delete(self):
"""Delete form if odd serial."""
return self.instance.serial % 2 != 0
NormalFormset = modelformset_factory(
User, form=CustomDeleteUserForm, can_delete=True
)
DeleteFormset = modelformset_factory(
User, form=CustomDeleteUserForm, formset=BaseCustomDeleteModelFormSet
)
data = {
"form-TOTAL_FORMS": "4",
"form-INITIAL_FORMS": "0",
"form-MAX_NUM_FORMS": "4",
"form-0-username": "John",
"form-0-serial": "1",
"form-1-username": "Paul",
"form-1-serial": "2",
"form-2-username": "George",
"form-2-serial": "3",
"form-3-username": "Ringo",
"form-3-serial": "5",
}
delete_all_ids = {
"form-0-DELETE": "1",
"form-1-DELETE": "1",
"form-2-DELETE": "1",
"form-3-DELETE": "1",
}
def test_init_database(self):
"""Add test data to database via formset"""
formset = self.NormalFormset(self.data)
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 4)
def test_no_delete(self):
"""Verify base formset doesn't modify database"""
# reload database
self.test_init_database()
# pass standard data dict & see none updated
data = dict(self.data)
data["form-INITIAL_FORMS"] = 4
data.update(
{
"form-%d-id" % i: user.pk
for i, user in enumerate(User.objects.order_by("pk"))
}
)
formset = self.NormalFormset(data, queryset=User.objects.all())
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 0)
self.assertEqual(len(User.objects.all()), 4)
def test_all_delete(self):
"""Verify base formset honors DELETE field"""
# reload database
self.test_init_database()
# create data dict with all fields marked for deletion
data = dict(self.data)
data["form-INITIAL_FORMS"] = 4
data.update(
{"form-%d-id" % i: user.pk for i, user in enumerate(User.objects.all())}
)
data.update(self.delete_all_ids)
formset = self.NormalFormset(data, queryset=User.objects.all())
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 0)
self.assertEqual(len(User.objects.all()), 0)
def test_custom_delete(self):
"""Verify DeleteFormset ignores DELETE field and uses form method"""
# reload database
self.test_init_database()
# Create formset with custom Delete function
# create data dict with all fields marked for deletion
data = dict(self.data)
data["form-INITIAL_FORMS"] = 4
data.update(
{
"form-%d-id" % i: user.pk
for i, user in enumerate(User.objects.order_by("pk"))
}
)
data.update(self.delete_all_ids)
formset = self.DeleteFormset(data, queryset=User.objects.all())
# Three with odd serial values were deleted.
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 0)
self.assertEqual(User.objects.count(), 1)
# No odd serial values left.
odd_serials = [user.serial for user in User.objects.all() if user.serial % 2]
self.assertEqual(len(odd_serials), 0)
class RedeleteTests(TestCase):
def test_resubmit(self):
u = User.objects.create(username="foo", serial=1)
us = UserSite.objects.create(user=u, data=7)
formset_cls = inlineformset_factory(User, UserSite, fields="__all__")
data = {
"serial": "1",
"username": "foo",
"usersite_set-TOTAL_FORMS": "1",
"usersite_set-INITIAL_FORMS": "1",
"usersite_set-MAX_NUM_FORMS": "1",
"usersite_set-0-id": str(us.pk),
"usersite_set-0-data": "7",
"usersite_set-0-user": "foo",
"usersite_set-0-DELETE": "1",
}
formset = formset_cls(data, instance=u)
self.assertTrue(formset.is_valid())
formset.save()
self.assertEqual(UserSite.objects.count(), 0)
formset = formset_cls(data, instance=u)
# Even if the "us" object isn't in the DB any more, the form
# validates.
self.assertTrue(formset.is_valid())
formset.save()
self.assertEqual(UserSite.objects.count(), 0)
def test_delete_already_deleted(self):
u = User.objects.create(username="foo", serial=1)
us = UserSite.objects.create(user=u, data=7)
formset_cls = inlineformset_factory(User, UserSite, fields="__all__")
data = {
"serial": "1",
"username": "foo",
"usersite_set-TOTAL_FORMS": "1",
"usersite_set-INITIAL_FORMS": "1",
"usersite_set-MAX_NUM_FORMS": "1",
"usersite_set-0-id": str(us.pk),
"usersite_set-0-data": "7",
"usersite_set-0-user": "foo",
"usersite_set-0-DELETE": "1",
}
formset = formset_cls(data, instance=u)
us.delete()
self.assertTrue(formset.is_valid())
formset.save()
self.assertEqual(UserSite.objects.count(), 0)
| {
"content_hash": "dedd6b40667e0883ad91f34cd007a80f",
"timestamp": "",
"source": "github",
"line_count": 586,
"max_line_length": 87,
"avg_line_length": 36.43344709897611,
"alnum_prop": 0.5802810304449648,
"repo_name": "rsalmaso/django",
"id": "0ccc2c04901f37071e0aab48e176824f87a33bea",
"size": "21350",
"binary": false,
"copies": "9",
"ref": "refs/heads/main",
"path": "tests/model_formsets_regress/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "91986"
},
{
"name": "HTML",
"bytes": "238949"
},
{
"name": "JavaScript",
"bytes": "157441"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "16194804"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "392"
}
],
"symlink_target": ""
} |
import os
import time
from atx.device.windows import Window, FrozenWindow, WindowsDevice
# def _input_left_mouse(self, x, y):
# left, top, right, bottom = self.position
# width, height = right - left, bottom - top
# if x < 0 or x > width or y < 0 or y > height:
# return
# win32gui.SetForegroundWindow(self._handle)
# pos = win32gui.GetCursorPos()
# win32api.SetCursorPos((left+x, top+y))
# win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)
# win32api.Sleep(100) #ms
# win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)
# win32api.Sleep(100) #ms
# win32api.SetCursorPos(pos)
# def drag(self):
# pass
# def _input_keyboard(self, text):
# pass
def test():
try:
name = u"Windows 任务管理器"
win = FrozenWindow(name.encode("gbk"), exclude_border=True)
win.set_foreground()
time.sleep(0.1)
win._screenshot('taskman-pil.png')
time.sleep(0.5)
win._screenshot_cv2('taskman-cv2.png')
except Exception as e:
print str(e)
try:
filepath = "C:\\Windows\\System32\\calc.exe"
win = Window(exe_file=filepath)
win.set_foreground()
time.sleep(0.1)
win._screenshot('calc-pil.png')
time.sleep(0.5)
win._screenshot_cv2('calc-cv2.png')
except Exception as e:
print str(e)
dev = WindowsDevice()
dev.screenshot('screen.png')
if __name__ == '__main__':
test() | {
"content_hash": "73cf3b59e2f4dff5af9a978bfc274c11",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 67,
"avg_line_length": 26.789473684210527,
"alnum_prop": 0.5808775376555337,
"repo_name": "Andy-hpliu/AirtestX",
"id": "981b4c5c2a8d909660a46ab42da979388e50a8e1",
"size": "1563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_windows.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "230"
},
{
"name": "CSS",
"bytes": "34684"
},
{
"name": "Go",
"bytes": "13043"
},
{
"name": "HTML",
"bytes": "28019"
},
{
"name": "JavaScript",
"bytes": "300119"
},
{
"name": "Makefile",
"bytes": "348"
},
{
"name": "Protocol Buffer",
"bytes": "5495"
},
{
"name": "Python",
"bytes": "394333"
},
{
"name": "Shell",
"bytes": "4162"
}
],
"symlink_target": ""
} |
from authy import __version__
from authy.api.resources import Users
from authy.api.resources import Tokens
from authy.api.resources import Apps
from authy.api.resources import StatsResource
from authy.api.resources import Phones
class AuthyApiClient(object):
"""
A client for accessing the Authy REST API
"""
def __init__(self, api_key, api_uri="https://api.authy.com"):
"""
Create a Authy REST API client.
"""
self.api_uri = api_uri
self.users = Users(api_uri, api_key)
self.tokens = Tokens(api_uri, api_key)
self.apps = Apps(api_uri, api_key)
self.stats = StatsResource(api_uri, api_key)
self.phones = Phones(api_uri, api_key)
self.api_key = api_key
def version(self):
return __version__
| {
"content_hash": "c5c15088da4ea59342d4be94d1104f6c",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 65,
"avg_line_length": 29.703703703703702,
"alnum_prop": 0.6396508728179551,
"repo_name": "FloorLamp/authy-python",
"id": "38a9cc6d275bf52ffa3e89fe7e9eb30f3607ab38",
"size": "802",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "authy/api/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "127"
},
{
"name": "Python",
"bytes": "15850"
}
],
"symlink_target": ""
} |
"""
A Styles is a collection of PropertySets that can be applied to a particular RTF element.
At present there are only two, Text and Paragraph but ListStyles will be added soon too.
"""
from PropertySets import *
class TextStyle:
def __init__(self, textProps, name=None, shading_props=None):
self.textProps = textProps
self.name = name
self.SetShadingPropertySet(shading_props)
def Copy(self):
return deepcopy(self)
def SetShadingPropertySet(self, value):
assert value is None or isinstance(value, ShadingPropertySet)
self.ShadingPropertySet = value or ShadingPropertySet()
return self
class ParagraphStyle:
def __init__(self, name, text_style, paragraph_props=None, frame_props=None, shading_props=None):
# A style must have Font and a Font Size but the Text property set doesn't
# make these mandatory so that they can be used for overrides so at this point
# we need to make sure that that we have these values set
if not text_style.textProps.font:
raise Exception('Paragraph Styles must have a Font specified.')
if not text_style.textProps.size:
raise Exception('Paragraph Styles must have a Font Size specified.')
self.name = name
self.SetTextStyle(text_style)
self.SetParagraphPropertySet(paragraph_props)
self.SetFramePropertySet(frame_props)
self.SetShadingPropertySet(shading_props)
self.SetBasedOn(None)
self.SetNext(None)
def Copy(self):
return deepcopy(self)
def SetTextStyle(self, value):
assert isinstance(value, TextStyle)
self.TextStyle = value
return self
def SetParagraphPropertySet(self, value):
assert value is None or isinstance(value, ParagraphPropertySet)
self.ParagraphPropertySet = value or ParagraphPropertySet()
return self
def SetFramePropertySet(self, value):
assert value is None or isinstance(value, FramePropertySet)
self.FramePropertySet = value or FramePropertySet()
return self
def SetShadingPropertySet(self, value):
"""Set the background shading for the paragraph."""
assert value is None or isinstance(value, ShadingPropertySet)
self.ShadingPropertySet = value or ShadingPropertySet()
return self
def SetBasedOn(self, value):
"""Set the Paragraph Style that this one is based on."""
assert not value or isinstance(value, ParagraphStyle)
self.BasedOn = value
return self
def SetNext(self, value):
"""Set the Paragraph Style that should follow this one."""
assert not value or isinstance(value, ParagraphStyle)
self.Next = value
return self
| {
"content_hash": "fe1ee2d79c1f83ec9ea50dfaf69c87ba",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 101,
"avg_line_length": 35.93589743589744,
"alnum_prop": 0.677488405280057,
"repo_name": "shvechikov/python-rtfng",
"id": "df14fe6ce5a3d7aad5c4170cc348dff1c8870dda",
"size": "2803",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "rtfng/Styles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "137316"
},
{
"name": "Shell",
"bytes": "4891"
}
],
"symlink_target": ""
} |
from django import forms
from django.forms import TextInput
from th_wallabag.models import Wallabag
class WallabagForm(forms.ModelForm):
"""
for to handle Wallabag service
"""
class Meta:
model = Wallabag
fields = ('tag',)
widgets = {
'tag': TextInput(attrs={'class': 'form-control'}),
}
class WallabagProviderForm(WallabagForm):
pass
class WallabagConsumerForm(WallabagForm):
pass
| {
"content_hash": "de02e4c3d975649884b3b407f22c03e5",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 62,
"avg_line_length": 18.48,
"alnum_prop": 0.6385281385281385,
"repo_name": "foxmask/django-th",
"id": "32b39f70ef9ec2247b28e374896c4c515203b54d",
"size": "479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "th_wallabag/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1654"
},
{
"name": "Dockerfile",
"bytes": "357"
},
{
"name": "HTML",
"bytes": "188416"
},
{
"name": "JavaScript",
"bytes": "796"
},
{
"name": "Python",
"bytes": "397000"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from google.api_core.protobuf_helpers import get_messages
from google.identity.accesscontextmanager.v1 import (
access_level_pb2,
access_policy_pb2,
service_perimeter_pb2,
)
_modules = [access_level_pb2, access_policy_pb2, service_perimeter_pb2]
names = []
for module in _modules:
for name, message in get_messages(module).items():
message.__module__ = module.__name__
| {
"content_hash": "f167a3f7b6cbf73f778bb368cabdc2c4",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 71,
"avg_line_length": 25.705882352941178,
"alnum_prop": 0.7139588100686499,
"repo_name": "googleapis/python-access-context-manager",
"id": "90c8729b3e6ca26c3012bdf574880894c37b2afd",
"size": "1040",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/identity/accesscontextmanager/v1/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "52300"
},
{
"name": "Shell",
"bytes": "30708"
}
],
"symlink_target": ""
} |
import paddle.fluid as fluid
from utils import gen_data
from nets import mlp
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import (
fleet,
)
from paddle.fluid.incubate.fleet.base import role_maker
input_x = fluid.layers.data(name="x", shape=[32], dtype='float32')
input_y = fluid.layers.data(name="y", shape=[1], dtype='int64')
input_y = fluid.layers.cast(input_y, dtype="float32")
with fluid.device_guard("gpu"):
input_y = fluid.layers.cast(input_y, dtype="int64")
cost = mlp(input_x, input_y)
optimizer = fluid.optimizer.Adagrad(learning_rate=0.01)
role = role_maker.PaddleCloudRoleMaker()
fleet.init(role)
optimizer = fleet.distributed_optimizer(optimizer)
optimizer.minimize(cost)
if fleet.is_server():
fleet.init_server()
fleet.run_server()
elif fleet.is_worker():
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fleet.startup_program)
step = 1001
for i in range(step):
cost_val = exe.run(
program=fleet.main_program, feed=gen_data(), fetch_list=[cost.name]
)
print(
"worker_index: %d, step%d cost = %f"
% (fleet.worker_index(), i, cost_val[0])
)
| {
"content_hash": "ecd8d002f41edd5a7e46d1dc66ea04c4",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 80,
"avg_line_length": 30.125,
"alnum_prop": 0.6705394190871369,
"repo_name": "luotao1/Paddle",
"id": "d14605a6179f7a956bf0220146563d0faa8e888f",
"size": "1816",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/fleet_ps_training.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36771446"
},
{
"name": "CMake",
"bytes": "903079"
},
{
"name": "Cuda",
"bytes": "5200715"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36248258"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553175"
}
],
"symlink_target": ""
} |
import json
import logging
import numpy as np
import itertools as it
from utils import parse_flag_file
from analyze import (
remove_extra_loci, get_resolution,
get_summary_data, get_haplotype_matrix,
get_resolution_matrix)
class Haplotype:
def __init__(self, patterns, minimum_spanning_set,
flag_file_path, primer_zone_size, variant_matrix, sep):
self._logger = logging.getLogger(__name__)
self._variant_matrix = variant_matrix
self._sep = sep
self._minimum_spanning_set = minimum_spanning_set
self._selected_patterns = \
self._minimum_spanning_set.get_selected_patterns()
self._selected_amplicons = \
self._minimum_spanning_set.get_selected_amplicons()
self._patterns = patterns
self._pattern_dic = patterns.get_pattern_dic(
self._selected_patterns)
self._pattern_df = patterns.get_pattern_df(
self._selected_patterns)
# Selected amps is the group of amplicons for a pattern that
# were not removed due to overlap with other amplicons
# in the minimum spanning set.
self._pattern_dic = self._get_selected_amplicons()
self._get_flags(
flag_file_path, int(primer_zone_size))
def _get_flags(self, flag_file_path, primer_zone_size):
flag_df = parse_flag_file(flag_file_path)
for pattern, amplicons in self._pattern_dic.iteritems():
for amplicon, chars in amplicons.iteritems():
genome = chars['g']['name']
genome_size = int(chars['g']['length'])
start = int(amplicon)
stop = int(chars['s'])
up_start = start - primer_zone_size - 1 if start - primer_zone_size > 1 else 0
up_stop = start - 1
down_start = stop
down_stop = (stop + primer_zone_size if
stop + primer_zone_size < genome_size
else genome_size - 1)
upstream_flags = np.array(
flag_df[flag_df.Genome == genome].iloc[up_start: up_stop].Flag, dtype=int)
downstream_flags = np.array(
flag_df[flag_df.Genome == genome].iloc[down_start: down_stop].Flag, dtype=int)
upstream_count = np.array([sum([not value for value in run[1]]) for
run in it.groupby(np.array(upstream_flags, dtype=bool))],
dtype=int)
downstream_count = np.array(
[sum([not value for value in run[1]]) for
run in it.groupby(np.array(downstream_flags, dtype=bool))],
dtype=int)
percent_ok = (
(np.sum(upstream_count) + np.sum(downstream_count))/float(
len(upstream_flags) + len(downstream_flags)) * 100)
med_size = np.median(np.append(upstream_count, downstream_count))
self._pattern_dic[pattern][amplicon]['primer_zone'] = {
'upstream': ",".join(np.array(upstream_flags, dtype=str)),
'downstream': ",".join(np.array(downstream_flags, dtype=str)),
'percent_ok': percent_ok,
'med_size': med_size
}
def _get_selected_amplicons(self):
new_dic = {}
for pattern, sel_amplicons in zip(
self._selected_patterns, self._selected_amplicons):
all_amplicons = self._pattern_dic[pattern]
new_dic[pattern] = {
k: v for k, v in all_amplicons.iteritems() if k in sel_amplicons}
return new_dic
def write_haplotype(self, file_name):
self._logger.info("Writing haplotype to %s", file_name)
self._pattern_df.to_csv(file_name)
def write_json(self, file_name):
self._logger.info(
"Writing minimum spanning set amplicons to %s",
file_name)
with open(file_name, 'w') as out:
out.write(json.dumps(self._pattern_dic))
def write_haplotype_matrix(self, file_name):
self._logger.info("Writing haplotype matrix to %s", file_name)
def write_suggested_amplicons(self, file_name):
self._logger.info(
"Writing suggested amplicons to %s", file_name
)
with open(file_name, 'w') as out:
out.write()
def write_output(self, haplotype_output, pattern_output, amplicon_output):
best_loci = remove_extra_loci(self._pattern_dic)
pattern_order = self._pattern_df.columns
haplotype_matrix = get_haplotype_matrix(
pattern_order, best_loci, self._variant_matrix, self._sep)
self._logger.info("Writing haplotype matrix to %s", haplotype_output)
haplotype_matrix.to_csv(haplotype_output)
self._logger.info("Writing pattern matrix to %s", pattern_output)
scores, patterns = get_resolution(self._pattern_df)
pattern_matrix = get_resolution_matrix(
self._pattern_df.index, pattern_order, patterns)
pattern_matrix.to_csv(pattern_output)
self._logger.info("Writing amplicon matrix to %s", amplicon_output)
amplicon_matrix = get_summary_data(best_loci, scores, pattern_order)
amplicon_matrix.to_csv(amplicon_output, index=None)
def write_summary(self, file_name):
self._logger.info(
"Writing summary to %s", file_name
)
with open(file_name, 'w') as out:
out.write(
"Minimum set size: {}\n".format(
len(self._selected_patterns)))
out.write(
"Resolution Index: {:0.2f}%\n".format(
self._minimum_spanning_set.get_resolution_index())
)
group_size, counts = \
self._minimum_spanning_set.get_resolution_groups()
out.write("Group Size Breakdown:\n")
labels = [
"Group(s) of size {}".format(i)
if i > 1 else "Strain(s) Fully Resolved"
for i in group_size]
for label, count in zip(labels, counts):
out.write("{0} {1}\n".format(count, label))
| {
"content_hash": "fb43ec28df916dda78fc82f033fd76ac",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 98,
"avg_line_length": 43.3125,
"alnum_prop": 0.5682219015552349,
"repo_name": "FofanovLab/VaST",
"id": "ce3e9b2ffad2f2abacd1f93f7e33ae7ee0566e01",
"size": "6237",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "VaST/Haplotype.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "100395"
}
],
"symlink_target": ""
} |
''' Some tests for the documenting decorator and support functions '''
import numpy as np
from numpy.testing import assert_equal, assert_raises
from nose.tools import assert_true
from scipy.misc import doccer
docstring = \
"""Docstring
%(strtest1)s
%(strtest2)s
%(strtest3)s
"""
param_doc1 = \
"""Another test
with some indent"""
param_doc2 = \
"""Another test, one line"""
param_doc3 = \
""" Another test
with some indent"""
doc_dict = {'strtest1':param_doc1,
'strtest2':param_doc2,
'strtest3':param_doc3}
filled_docstring = \
"""Docstring
Another test
with some indent
Another test, one line
Another test
with some indent
"""
def test_unindent():
yield assert_equal, doccer.unindent_string(param_doc1), param_doc1
yield assert_equal, doccer.unindent_string(param_doc2), param_doc2
yield assert_equal, doccer.unindent_string(param_doc3), param_doc1
def test_unindent_dict():
d2 = doccer.unindent_dict(doc_dict)
yield assert_equal, d2['strtest1'], doc_dict['strtest1']
yield assert_equal, d2['strtest2'], doc_dict['strtest2']
yield assert_equal, d2['strtest3'], doc_dict['strtest1']
def test_docformat():
udd = doccer.unindent_dict(doc_dict)
formatted = doccer.docformat(docstring, udd)
yield assert_equal, formatted, filled_docstring
single_doc = 'Single line doc %(strtest1)s'
formatted = doccer.docformat(single_doc, doc_dict)
# Note - initial indent of format string does not
# affect subsequent indent of inserted parameter
yield assert_equal, formatted, """Single line doc Another test
with some indent"""
def test_decorator():
# with unindentation of parameters
decorator = doccer.filldoc(doc_dict, True)
@decorator
def func():
""" Docstring
%(strtest3)s
"""
yield assert_equal, func.__doc__, """ Docstring
Another test
with some indent
"""
# without unindentation of parameters
decorator = doccer.filldoc(doc_dict, False)
@decorator
def func():
""" Docstring
%(strtest3)s
"""
yield assert_equal, func.__doc__, """ Docstring
Another test
with some indent
"""
| {
"content_hash": "a407964c03231f34ecf0cb29d893d578",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 70,
"avg_line_length": 25.629213483146067,
"alnum_prop": 0.64138535729943,
"repo_name": "teoliphant/scipy",
"id": "6204a9b60306dcbb749b204d9a4f813409e1cd1d",
"size": "2281",
"binary": false,
"copies": "60",
"ref": "refs/heads/master",
"path": "scipy/misc/tests/test_doccer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11530901"
},
{
"name": "C++",
"bytes": "7695320"
},
{
"name": "FORTRAN",
"bytes": "5898903"
},
{
"name": "Matlab",
"bytes": "1861"
},
{
"name": "Objective-C",
"bytes": "137083"
},
{
"name": "Python",
"bytes": "5863600"
},
{
"name": "Shell",
"bytes": "1793"
}
],
"symlink_target": ""
} |
"""Base class for RPC testing."""
import configparser
from enum import Enum
import argparse
import logging
import os
import pdb
import random
import re
import shutil
import subprocess
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .p2p import NetworkThread
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
get_datadir_path,
initialize_datadir,
p2p_port,
wait_until_helper,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
TMPDIR_PREFIX = "bitsend_func_test_"
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
class BitSendTestMetaClass(type):
"""Metaclass for BitSendTestFramework.
Ensures that any attempt to register a subclass of `BitSendTestFramework`
adheres to a standard whereby the subclass overrides `set_test_params` and
`run_test` but DOES NOT override either `__init__` or `main`. If any of
those standards are violated, a ``TypeError`` is raised."""
def __new__(cls, clsname, bases, dct):
if not clsname == 'BitSendTestFramework':
if not ('run_test' in dct and 'set_test_params' in dct):
raise TypeError("BitSendTestFramework subclasses must override "
"'run_test' and 'set_test_params'")
if '__init__' in dct or 'main' in dct:
raise TypeError("BitSendTestFramework subclasses may not override "
"'__init__' or 'main'")
return super().__new__(cls, clsname, bases, dct)
class BitSendTestFramework(metaclass=BitSendTestMetaClass):
"""Base class for a bitsend test script.
Individual bitsend test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
chain = None # type: str
setup_clean_chain = None # type: bool
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.chain = 'regtest'
self.setup_clean_chain = False
self.nodes = []
self.network_thread = None
self.rpc_timeout = 60 # Wait for up to 60 seconds for the RPC server to respond
self.supports_cli = True
self.bind_to_localhost_only = True
self.parse_args()
self.default_wallet_name = "default_wallet" if self.options.descriptors else ""
self.wallet_data_filename = "wallet.dat"
# Optional list of wallet names that can be set in set_test_params to
# create and import keys to. If unset, default is len(nodes) *
# [default_wallet_name]. If wallet names are None, wallet creation is
# skipped. If list is truncated, wallet creation is skipped and keys
# are not imported.
self.wallet_names = None
self.set_test_params()
assert self.wallet_names is None or len(self.wallet_names) <= self.num_nodes
if self.options.timeout_factor == 0 :
self.options.timeout_factor = 99999
self.rpc_timeout = int(self.rpc_timeout * self.options.timeout_factor) # optionally, increase timeout by a factor
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
try:
self.setup()
self.run_test()
except JSONRPCException:
self.log.exception("JSONRPC error")
self.success = TestStatus.FAILED
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
self.success = TestStatus.SKIPPED
except AssertionError:
self.log.exception("Assertion failed")
self.success = TestStatus.FAILED
except KeyError:
self.log.exception("Key error")
self.success = TestStatus.FAILED
except subprocess.CalledProcessError as e:
self.log.exception("Called Process failed with '{}'".format(e.output))
self.success = TestStatus.FAILED
except Exception:
self.log.exception("Unexpected exception caught during testing")
self.success = TestStatus.FAILED
except KeyboardInterrupt:
self.log.warning("Exiting after keyboard interrupt")
self.success = TestStatus.FAILED
finally:
exit_code = self.shutdown()
sys.exit(exit_code)
def parse_args(self):
previous_releases_path = os.getenv("PREVIOUS_RELEASES_DIR") or os.getcwd() + "/releases"
parser = argparse.ArgumentParser(usage="%(prog)s [options]")
parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitsendds and test.* datadir on exit or error")
parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitsendds after the test execution")
parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs (default: %(default)s)")
parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int,
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_argument("--previous-releases", dest="prev_releases", action="store_true",
default=os.path.isdir(previous_releases_path) and bool(os.listdir(previous_releases_path)),
help="Force test of previous releases (default: %(default)s)")
parser.add_argument("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_argument("--configfile", dest="configfile",
default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../config.ini"),
help="Location of the test framework config file (default: %(default)s)")
parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_argument("--usecli", dest="usecli", default=False, action="store_true",
help="use bitsend-cli instead of RPC for all commands")
parser.add_argument("--perf", dest="perf", default=False, action="store_true",
help="profile running nodes with perf for the duration of the test")
parser.add_argument("--valgrind", dest="valgrind", default=False, action="store_true",
help="run nodes under the valgrind memory error detector: expect at least a ~10x slowdown, valgrind 3.14 or later required")
parser.add_argument("--randomseed", type=int,
help="set a random seed for deterministically reproducing a previous test run")
parser.add_argument('--timeout-factor', dest="timeout_factor", type=float, default=1.0, help='adjust test timeouts by a factor. Setting it to 0 disables all timeouts')
group = parser.add_mutually_exclusive_group()
group.add_argument("--descriptors", default=False, action="store_true",
help="Run test using a descriptor wallet", dest='descriptors')
group.add_argument("--legacy-wallet", default=False, action="store_false",
help="Run test using legacy wallets", dest='descriptors')
self.add_options(parser)
self.options = parser.parse_args()
self.options.previous_releases_path = previous_releases_path
def setup(self):
"""Call this method to start up the test framework object with options set."""
PortSeed.n = self.options.port_seed
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
self.config = config
fname_bitsendd = os.path.join(
config["environment"]["BUILDDIR"],
"src",
"bitsendd" + config["environment"]["EXEEXT"],
)
fname_bitsendcli = os.path.join(
config["environment"]["BUILDDIR"],
"src",
"bitsend-cli" + config["environment"]["EXEEXT"],
)
self.options.bitsendd = os.getenv("BITSENDD", default=fname_bitsendd)
self.options.bitsendcli = os.getenv("BITSENDCLI", default=fname_bitsendcli)
os.environ['PATH'] = os.pathsep.join([
os.path.join(config['environment']['BUILDDIR'], 'src'),
os.path.join(config['environment']['BUILDDIR'], 'src', 'qt'), os.environ['PATH']
])
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX)
self._start_logging()
# Seed the PRNG. Note that test runs are reproducible if and only if
# a single thread accesses the PRNG. For more information, see
# https://docs.python.org/3/library/random.html#notes-on-reproducibility.
# The network thread shouldn't access random. If we need to change the
# network thread to access randomness, it should instantiate its own
# random.Random object.
seed = self.options.randomseed
if seed is None:
seed = random.randrange(sys.maxsize)
else:
self.log.debug("User supplied random seed {}".format(seed))
random.seed(seed)
self.log.debug("PRNG seed is: {}".format(seed))
self.log.debug('Setting up network thread')
self.network_thread = NetworkThread()
self.network_thread.start()
if self.options.usecli:
if not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.skip_if_no_cli()
self.skip_test_if_missing_module()
self.setup_chain()
self.setup_network()
self.success = TestStatus.PASSED
def shutdown(self):
"""Call this method to shut down the test framework object."""
if self.success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
self.log.debug('Closing down network thread')
self.network_thread.close()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: bitsendds were not stopped and may still be running")
should_clean_up = (
not self.options.nocleanup and
not self.options.noshutdown and
self.success != TestStatus.FAILED and
not self.options.perf
)
if should_clean_up:
self.log.info("Cleaning up {} on exit".format(self.options.tmpdir))
cleanup_tree_on_exit = True
elif self.options.perf:
self.log.warning("Not cleaning up dir {} due to perf data".format(self.options.tmpdir))
cleanup_tree_on_exit = False
else:
self.log.warning("Not cleaning up dir {}".format(self.options.tmpdir))
cleanup_tree_on_exit = False
if self.success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif self.success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("")
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
self.log.error("")
self.log.error("If this failure happened unexpectedly or intermittently, please file a bug and provide a link or upload of the combined log.")
self.log.error(self.config['environment']['PACKAGE_BUGREPORT'])
self.log.error("")
exit_code = TEST_EXIT_FAILED
# Logging.shutdown will not remove stream- and filehandlers, so we must
# do it explicitly. Handlers are removed so the next test run can apply
# different log handler settings.
# See: https://docs.python.org/3/library/logging.html#logging.shutdown
for h in list(self.log.handlers):
h.flush()
h.close()
self.log.removeHandler(h)
rpc_logger = logging.getLogger("BitSendRPC")
for h in list(rpc_logger.handlers):
h.flush()
rpc_logger.removeHandler(h)
if cleanup_tree_on_exit:
shutil.rmtree(self.options.tmpdir)
self.nodes.clear()
return exit_code
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def skip_test_if_missing_module(self):
"""Override this method to skip a test if a module is not compiled"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
#
# Topology looks like this:
# node0 <-- node1 <-- node2 <-- node3
#
# If all nodes are in IBD (clean chain from genesis), node0 is assumed to be the source of blocks (miner). To
# ensure block propagation, all nodes will establish outgoing connections toward node0.
# See fPreferredDownload in net_processing.
#
# If further outbound connections are needed, they can be added at the beginning of the test with e.g.
# self.connect_nodes(1, 2)
for i in range(self.num_nodes - 1):
self.connect_nodes(i + 1, i)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = [[]] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
if self.is_wallet_compiled():
self.import_deterministic_coinbase_privkeys()
if not self.setup_clean_chain:
for n in self.nodes:
assert_equal(n.getblockchaininfo()["blocks"], 199)
# To ensure that all nodes are out of IBD, the most recent block
# must have a timestamp not too old (see IsInitialBlockDownload()).
self.log.debug('Generate a block with current time')
block_hash = self.nodes[0].generate(1)[0]
block = self.nodes[0].getblock(blockhash=block_hash, verbosity=0)
for n in self.nodes:
n.submitblock(block)
chain_info = n.getblockchaininfo()
assert_equal(chain_info["blocks"], 200)
assert_equal(chain_info["initialblockdownload"], False)
def import_deterministic_coinbase_privkeys(self):
for i in range(self.num_nodes):
self.init_wallet(i)
def init_wallet(self, i):
wallet_name = self.default_wallet_name if self.wallet_names is None else self.wallet_names[i] if i < len(self.wallet_names) else False
if wallet_name is not False:
n = self.nodes[i]
if wallet_name is not None:
n.createwallet(wallet_name=wallet_name, descriptors=self.options.descriptors, load_on_startup=True)
n.importprivkey(privkey=n.get_deterministic_priv_key().key, label='coinbase')
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes: int, extra_args=None, *, rpchost=None, binary=None, binary_cli=None, versions=None):
"""Instantiate TestNode objects.
Should only be called once after the nodes have been specified in
set_test_params()."""
def get_bin_from_version(version, bin_name, bin_default):
if not version:
return bin_default
return os.path.join(
self.options.previous_releases_path,
re.sub(
r'\.0$',
'', # remove trailing .0 for point releases
'v{}.{}.{}.{}'.format(
(version % 100000000) // 1000000,
(version % 1000000) // 10000,
(version % 10000) // 100,
(version % 100) // 1,
),
),
'bin',
bin_name,
)
if self.bind_to_localhost_only:
extra_confs = [["bind=127.0.0.1"]] * num_nodes
else:
extra_confs = [[]] * num_nodes
if extra_args is None:
extra_args = [[]] * num_nodes
if versions is None:
versions = [None] * num_nodes
if binary is None:
binary = [get_bin_from_version(v, 'bitsendd', self.options.bitsendd) for v in versions]
if binary_cli is None:
binary_cli = [get_bin_from_version(v, 'bitsend-cli', self.options.bitsendcli) for v in versions]
assert_equal(len(extra_confs), num_nodes)
assert_equal(len(extra_args), num_nodes)
assert_equal(len(versions), num_nodes)
assert_equal(len(binary), num_nodes)
assert_equal(len(binary_cli), num_nodes)
for i in range(num_nodes):
test_node_i = TestNode(
i,
get_datadir_path(self.options.tmpdir, i),
chain=self.chain,
rpchost=rpchost,
timewait=self.rpc_timeout,
timeout_factor=self.options.timeout_factor,
bitsendd=binary[i],
bitsend_cli=binary_cli[i],
version=versions[i],
coverage_dir=self.options.coveragedir,
cwd=self.options.tmpdir,
extra_conf=extra_confs[i],
extra_args=extra_args[i],
use_cli=self.options.usecli,
start_perf=self.options.perf,
use_valgrind=self.options.valgrind,
descriptors=self.options.descriptors,
)
self.nodes.append(test_node_i)
if not test_node_i.version_is_at_least(170000):
# adjust conf for pre 17
conf_file = test_node_i.bitsendconf
with open(conf_file, 'r', encoding='utf8') as conf:
conf_data = conf.read()
with open(conf_file, 'w', encoding='utf8') as conf:
conf.write(conf_data.replace('[regtest]', ''))
def start_node(self, i, *args, **kwargs):
"""Start a bitsendd"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple bitsendds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i, expected_stderr='', wait=0):
"""Stop a bitsendd test node"""
self.nodes[i].stop_node(expected_stderr, wait=wait)
self.nodes[i].wait_until_stopped()
def stop_nodes(self, wait=0):
"""Stop multiple bitsendd test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node(wait=wait)
for node in self.nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def connect_nodes(self, a, b):
def connect_nodes_helper(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
# See comments in net_processing:
# * Must have a version message before anything else
# * Must have a verack message before anything else
wait_until_helper(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
wait_until_helper(lambda: all(peer['bytesrecv_per_msg'].pop('verack', 0) == 24 for peer in from_connection.getpeerinfo()))
connect_nodes_helper(self.nodes[a], b)
def disconnect_nodes(self, a, b):
def disconnect_nodes_helper(from_connection, node_num):
def get_peer_ids():
result = []
for peer in from_connection.getpeerinfo():
if "testnode{}".format(node_num) in peer['subver']:
result.append(peer['id'])
return result
peer_ids = get_peer_ids()
if not peer_ids:
self.log.warning("disconnect_nodes: {} and {} were not connected".format(
from_connection.index,
node_num,
))
return
for peer_id in peer_ids:
try:
from_connection.disconnectnode(nodeid=peer_id)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until_helper(lambda: not get_peer_ids(), timeout=5)
disconnect_nodes_helper(self.nodes[a], b)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
self.disconnect_nodes(1, 2)
self.sync_all(self.nodes[:2])
self.sync_all(self.nodes[2:])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
self.connect_nodes(1, 2)
self.sync_all()
def sync_blocks(self, nodes=None, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
rpc_connections = nodes or self.nodes
timeout = int(timeout * self.options.timeout_factor)
stop_time = time.time() + timeout
while time.time() <= stop_time:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash.count(best_hash[0]) == len(rpc_connections):
return
# Check that each peer has at least one connection
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
time.sleep(wait)
raise AssertionError("Block sync timed out after {}s:{}".format(
timeout,
"".join("\n {!r}".format(b) for b in best_hash),
))
def sync_mempools(self, nodes=None, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
rpc_connections = nodes or self.nodes
timeout = int(timeout * self.options.timeout_factor)
stop_time = time.time() + timeout
while time.time() <= stop_time:
pool = [set(r.getrawmempool()) for r in rpc_connections]
if pool.count(pool[0]) == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
# Check that each peer has at least one connection
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
time.sleep(wait)
raise AssertionError("Mempool sync timed out after {}s:{}".format(
timeout,
"".join("\n {!r}".format(m) for m in pool),
))
def sync_all(self, nodes=None):
self.sync_blocks(nodes)
self.sync_mempools(nodes)
def wait_until(self, test_function, timeout=60):
return wait_until_helper(test_function, timeout=timeout, timeout_factor=self.options.timeout_factor)
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log', encoding='utf-8')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitsendd's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitSendRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 199-block-long chain
Afterward, create num_nodes copies from the cache."""
CACHE_NODE_ID = 0 # Use node 0 to create the cache for all other nodes
cache_node_dir = get_datadir_path(self.options.cachedir, CACHE_NODE_ID)
assert self.num_nodes <= MAX_NODES
if not os.path.isdir(cache_node_dir):
self.log.debug("Creating cache directory {}".format(cache_node_dir))
initialize_datadir(self.options.cachedir, CACHE_NODE_ID, self.chain)
self.nodes.append(
TestNode(
CACHE_NODE_ID,
cache_node_dir,
chain=self.chain,
extra_conf=["bind=127.0.0.1"],
extra_args=['-disablewallet'],
rpchost=None,
timewait=self.rpc_timeout,
timeout_factor=self.options.timeout_factor,
bitsendd=self.options.bitsendd,
bitsend_cli=self.options.bitsendcli,
coverage_dir=None,
cwd=self.options.tmpdir,
descriptors=self.options.descriptors,
))
self.start_node(CACHE_NODE_ID)
cache_node = self.nodes[CACHE_NODE_ID]
# Wait for RPC connections to be ready
cache_node.wait_for_rpc_connection()
# Set a time in the past, so that blocks don't end up in the future
cache_node.setmocktime(cache_node.getblockheader(cache_node.getbestblockhash())['time'])
# Create a 199-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# The 4th node gets only 24 immature blocks so that the very last
# block in the cache does not age too much (have an old tip age).
# This is needed so that we are out of IBD when the test starts,
# see the tip age check in IsInitialBlockDownload().
for i in range(8):
cache_node.generatetoaddress(
nblocks=25 if i != 7 else 24,
address=TestNode.PRIV_KEYS[i % 4].address,
)
assert_equal(cache_node.getblockchaininfo()["blocks"], 199)
# Shut it down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
def cache_path(*paths):
return os.path.join(cache_node_dir, self.chain, *paths)
os.rmdir(cache_path('wallets')) # Remove empty wallets dir
for entry in os.listdir(cache_path()):
if entry not in ['chainstate', 'blocks']: # Only keep chainstate and blocks folder
os.remove(cache_path(entry))
for i in range(self.num_nodes):
self.log.debug("Copy cache directory {} to node {}".format(cache_node_dir, i))
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(cache_node_dir, to_dir)
initialize_datadir(self.options.tmpdir, i, self.chain) # Overwrite port/rpcport in bitsend.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i, self.chain)
def skip_if_no_py3_zmq(self):
"""Attempt to import the zmq package and skip the test if the import fails."""
try:
import zmq # noqa
except ImportError:
raise SkipTest("python3-zmq module not available.")
def skip_if_no_bitsendd_zmq(self):
"""Skip the running test if bitsendd has not been compiled with zmq support."""
if not self.is_zmq_compiled():
raise SkipTest("bitsendd has not been built with zmq enabled.")
def skip_if_no_wallet(self):
"""Skip the running test if wallet has not been compiled."""
if not self.is_wallet_compiled():
raise SkipTest("wallet has not been compiled.")
if self.options.descriptors:
self.skip_if_no_sqlite()
def skip_if_no_sqlite(self):
"""Skip the running test if sqlite has not been compiled."""
if not self.is_sqlite_compiled():
raise SkipTest("sqlite has not been compiled.")
def skip_if_no_wallet_tool(self):
"""Skip the running test if bitsend-wallet has not been compiled."""
if not self.is_wallet_tool_compiled():
raise SkipTest("bitsend-wallet has not been compiled")
def skip_if_no_cli(self):
"""Skip the running test if bitsend-cli has not been compiled."""
if not self.is_cli_compiled():
raise SkipTest("bitsend-cli has not been compiled.")
def skip_if_no_previous_releases(self):
"""Skip the running test if previous releases are not available."""
if not self.has_previous_releases():
raise SkipTest("previous releases not available or disabled")
def has_previous_releases(self):
"""Checks whether previous releases are present and enabled."""
if not os.path.isdir(self.options.previous_releases_path):
if self.options.prev_releases:
raise AssertionError("Force test of previous releases but releases missing: {}".format(
self.options.previous_releases_path))
return self.options.prev_releases
def is_cli_compiled(self):
"""Checks whether bitsend-cli was compiled."""
return self.config["components"].getboolean("ENABLE_CLI")
def is_wallet_compiled(self):
"""Checks whether the wallet module was compiled."""
return self.config["components"].getboolean("ENABLE_WALLET")
def is_wallet_tool_compiled(self):
"""Checks whether bitsend-wallet was compiled."""
return self.config["components"].getboolean("ENABLE_WALLET_TOOL")
def is_zmq_compiled(self):
"""Checks whether the zmq module was compiled."""
return self.config["components"].getboolean("ENABLE_ZMQ")
def is_sqlite_compiled(self):
"""Checks whether the wallet module was compiled."""
return self.config["components"].getboolean("USE_SQLITE")
| {
"content_hash": "d484b184c8beb918217e8229ef70ba1a",
"timestamp": "",
"source": "github",
"line_count": 822,
"max_line_length": 312,
"avg_line_length": 44.071776155717764,
"alnum_prop": 0.6004913462334722,
"repo_name": "LIMXTEC/BitSend",
"id": "c1402a991bfef260eb6dcdef272a87623f2a8204",
"size": "36441",
"binary": false,
"copies": "1",
"ref": "refs/heads/0.21-master",
"path": "test/functional/test_framework/test_framework.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28173"
},
{
"name": "C",
"bytes": "4450630"
},
{
"name": "C++",
"bytes": "8023567"
},
{
"name": "CMake",
"bytes": "28560"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "M4",
"bytes": "214695"
},
{
"name": "Makefile",
"bytes": "117044"
},
{
"name": "Objective-C++",
"bytes": "5497"
},
{
"name": "Python",
"bytes": "2204293"
},
{
"name": "QMake",
"bytes": "798"
},
{
"name": "Sage",
"bytes": "35184"
},
{
"name": "Scheme",
"bytes": "7554"
},
{
"name": "Shell",
"bytes": "154029"
}
],
"symlink_target": ""
} |
import math
import numbers
# Special dependencies
import numpy
class InfiniteType:
def __init__(self, multiplier=1.):
if multiplier == 0.: raise ZeroDivisionError, "Cannot multiply infinity and zero."
self._multiplier = multiplier
def __repr__(self):
if self is Infinity:
return "Infinity"
elif self is MinusInfinity:
return "-Infinity"
elif self._multiplier > 0.:
return "Infinity*%g" % self._multiplier
else:
return "-Infinity*%g" % abs(self._multiplier)
def __neg__(self):
if self is Infinity:
return MinusInfinity
elif self is MinusInfinity:
return Infinity
else:
return self * -1.
def __mul__(self, number):
if number == 0.: raise ZeroDivisionError, "Cannot multiply infinity and zero."
return InfiniteType(self._multiplier * number)
def __div__(self, number):
if isinstance(number, InfiniteType): raise ZeroDivisionError, "Cannot divide infinity and infinity."
if number == 0: raise ZeroDivisionError, "Cannot divide infinity and zero."
return InfiniteType(self._multiplier / number)
def __truediv__(self, number):
return self.__div__(number)
#: Symbol representing infinity; can be multiplied by any scalar.
#:
#: Note: in a product, Infinity must *precede* the scalar::
#:
#: >>> Infinity * -5. # right
#: >>> -5. * Infinity # wrong
Infinity = InfiniteType()
MinusInfinity = InfiniteType(-1.)
#: A small number (1e-5), used to avoid numerical round-off issues in
#: comparisons.
#:
#: The following can be used to set epsilon (without any
#: multiple-reference issues)::
#:
#: >>> import cassius
#: >>> cassius.epsilon = 1e-10
epsilon = 1e-5
######################################################### Utility functions
def _roundlevel_nsigfigs(num, n):
if num == 0.: return 1
return n - int(math.ceil(math.log10(abs(num))))
def str_round(num, n):
"""Round a number to n digits and return the result as a string."""
num = round(num, n)
format = "%."+str(max(n, 0))+"f"
return format % num
def round_sigfigs(num, n):
"Round a number to n significant figures."
return round(num, _roundlevel_nsigfigs(num, n))
def str_sigfigs(num, n):
"""Round a number to n significant figures and return the result as
a string."""
level = _roundlevel_nsigfigs(num, n)
num = round(num, level)
format = "%."+str(max(level, 0))+"f"
return format % num
def round_errpair(num, err, n=2):
"""Round a number and its uncertainty to n significant figures in
the uncertainty (default is two)."""
level = _roundlevel_nsigfigs(err, n)
return round(num, level), round(err, level)
def str_errpair(num, err, n=2):
"""Round a number and its uncertainty to n significant figures in the
uncertainty (default is two) and return the result as a string."""
level = _roundlevel_nsigfigs(err, n)
num = round(num, level)
err = round(err, level)
format = "%."+str(max(level, 0))+"f"
return format % num, format % err
def unicode_errpair(num, err, n=2):
"""Round a number and its uncertainty to n significant figures in the
uncertainty (default is two) and return the result joined by a unicode
plus-minus sign."""
output = u"\u00b1".join(str_errpair(num, err, n))
return output.replace("-", u"\u2212")
def mean(*values, **kwds):
"""Compute the mean of N values (N > 0).
Keyword arguments:
decimals (int or `None`): number of digits after the decimal
point to found the result, if not `None`
sigfigs (int or `None`): number of significant digits to round
the result, if not `None`
string (bool): return output as a string (forces number of digits)
"""
decimals = kwds.get("decimals", None)
sigfigs = kwds.get("sigfigs", None)
string = kwds.get("string", False)
if len(values) == 1 and not isinstance(values[0], (numbers.Number, numpy.number)):
values = values[0]
sum_1 = 0.
sum_y = 0.
for y in values:
if not isinstance(y, (numbers.Number, numpy.number)):
raise ValueError, "mean() requires a list of numbers"
sum_1 += 1.
sum_y += y
if sum_1 != 0.:
output = sum_y / sum_1
if decimals is not None:
if string:
return str_round(output, decimals)
else:
return round(output, decimals)
elif sigfigs is not None:
if string:
return str_sigfigs(output, sigfigs)
else:
return round_sigfigs(output, sigfigs)
else:
if string:
return str(output)
else:
return output
else:
raise ValueError, "Cannot take the mean without any values"
def wmean(values, weights, decimals=None, sigfigs=None, string=False):
"""Compute the weighted mean of N values with N weights (N > 0).
Keyword arguments:
decimals (int or `None`): number of digits after the decimal
point to found the result, if not `None`
sigfigs (int or `None`): number of significant digits to round
the result, if not `None`
string (bool): return output as a string (forces number of digits)
"""
sum_1 = 0.
sum_y = 0.
for y, weight in itertools.izip(values, weights):
if not isinstance(y, (numbers.Number, numpy.number)) or not isinstance(weight, (numbers.Number, numpy.number)):
raise ValueError, "wmean() requires lists of numbers"
sum_1 += weight
sum_y += weight * y
if sum_1 != 0.:
outputval, outputerr = sum_y / sum_1, math.sqrt(1. / sum_1)
if decimals is not None:
if string:
return str_round(outputval, decimals), str_round(outputerr, decimals)
else:
return round(outputval, decimals), round(outputerr, decimals)
elif sigfigs is not None:
if string:
return str_errpair(outputval, outputerr, sigfigs)
else:
return round_errpair(outputval, outputerr, sigfigs)
else:
if string:
return str(outputval), str(outputerr)
else:
return outputval, outputerr
else:
raise ValueError, "Cannot take the weighted mean without any values"
def linearfit(xvalues, yvalues, weights=None, decimals=None, sigfigs=None, string=False):
"""Compute a linear fit of N x-y pairs with weights (N > 0).
Keyword arguments:
weights (list of numbers or `None`): if `None`, weight all
points equally.
decimals (int or `None`): number of digits after the decimal
point to found the result, if not `None`
sigfigs (int or `None`): number of significant digits to round
the result, if not `None`
string (bool): return output as a string (forces number of digits)
"""
if weights is None:
weights = numpy.ones(min(len(xvalues), len(yvalues)), dtype=numpy.float)
sum_1 = 0.
sum_x = 0.
sum_xx = 0.
sum_y = 0.
sum_xy = 0.
for x, y, weight in itertools.izip(xvalues, yvalues, weights):
if not isinstance(x, (numbers.Number, numpy.number)) or not isinstance(y, (numbers.Number, numpy.number)) or not isinstance(weight, (numbers.Number, numpy.number)):
raise ValueError, "linearfit() requires lists of numbers"
sum_1 += weight
sum_x += weight * x
sum_xx += weight * x**2
sum_y += weight * y
sum_xy += weight * x * y
delta = (sum_1 * sum_xx) - (sum_x * sum_x)
if delta != 0.:
intercept = ((sum_xx * sum_y) - (sum_x * sum_xy)) / delta
intercept_err = math.sqrt(sum_xx / delta)
slope = ((sum_1 * sum_xy) - (sum_x * sum_y)) / delta
slope_err = math.sqrt(sum_1 / delta)
if decimals is not None:
if string:
intercept, intercept_err = str_round(intercept, decimals), str_round(intercept_err, decimals)
slope, slope_err = str_round(slope, decimals), str_round(slope_err, decimals)
else:
intercept, intercept_err = round(intercept, decimals), round(intercept_err, decimals)
slope, slope_err = round(slope, decimals), round(slope_err, decimals)
elif sigfigs is not None:
if string:
intercept, intercept_err = str_errpair(intercept, intercept_err, sigfigs)
slope, slope_err = str_errpair(slope, slope_err, sigfigs)
else:
intercept, intercept_err = round_errpair(intercept, intercept_err, sigfigs)
slope, slope_err = round_errpair(slope, slope_err, sigfigs)
elif string:
intercept, intercept_err = str(intercept), str(intercept_err)
slope, slope_err = str(slope), str(slope_err)
return intercept, intercept_err, slope, slope_err
else:
raise ValueError, "Cannot take a linear fit without any values"
def rms(*values, **kwds):
"""Compute the root-mean-square of N values (N > 0).
Keyword arguments:
decimals (int or `None`): number of digits after the decimal
point to found the result, if not `None`
sigfigs (int or `None`): number of significant digits to round
the result, if not `None`
string (bool): return output as a string (forces number of digits)
"""
decimals = kwds.get("decimals", None)
sigfigs = kwds.get("sigfigs", None)
string = kwds.get("string", False)
if len(values) == 1 and not isinstance(values[0], (numbers.Number, numpy.number)):
values = values[0]
sum_1 = 0.
sum_yy = 0.
for y in values:
if not isinstance(y, (numbers.Number, numpy.number)):
raise ValueError, "rms() requires a list of numbers"
sum_1 += 1.
sum_yy += y**2
if sum_1 != 0.:
output = math.sqrt(sum_yy / sum_1)
if decimals is not None:
if string:
return str_round(output, decimals)
else:
return round(output, decimals)
elif sigfigs is not None:
if string:
return str_sigfigs(output, sigfigs)
else:
return round_sigfigs(output, sigfigs)
else:
if string:
return str(output)
else:
return output
else:
raise ValueError, "Cannot take the RMS with fewer than one unique value"
def stdev(*values, **kwds):
"""Compute the standard deviation of N values (N > 0).
Keyword arguments:
unbiased (bool defaulting to True): return unbiased sample
deviation, sqrt(sum(xi - mean)**2/(N - 1)), rather than the
biased estimator, sqrt(sum(xi - mean)**2/ N )
decimals (int or `None`): number of digits after the decimal
point to found the result, if not `None`
sigfigs (int or `None`): number of significant digits to round
the result, if not `None`
string (bool): return output as a string (forces number of digits)
"""
unbiased = kwds.get("unbiased", True)
decimals = kwds.get("decimals", None)
sigfigs = kwds.get("sigfigs", None)
string = kwds.get("string", False)
if len(values) == 1 and not isinstance(values[0], (numbers.Number, numpy.number)):
values = values[0]
sum_1 = 0.
sum_y = 0.
sum_yy = 0.
for y in values:
if not isinstance(y, (numbers.Number, numpy.number)):
raise ValueError, "stdev() requires a list of numbers"
sum_1 += 1.
sum_y += y
sum_yy += y**2
if sum_1 > 1. and (sum_yy / sum_1) > (sum_y / sum_1)**2:
output = math.sqrt((sum_yy / sum_1) - (sum_y / sum_1)**2)
if unbiased:
output *= math.sqrt(sum_1 / (sum_1 - 1.))
if decimals is not None:
if string:
return str_round(output, decimals)
else:
return round(output, decimals)
elif sigfigs is not None:
if string:
return str_sigfigs(output, sigfigs)
else:
return round_sigfigs(output, sigfigs)
else:
if string:
return str(output)
else:
return output
else:
raise ValueError, "Cannot take the stdev with fewer than one unique value"
def covariance(xvalues, yvalues, decimals=None, sigfigs=None, string=False):
"""Compute the covariance of N x-y pairs (N > 0).
Keyword arguments:
decimals (int or `None`): number of digits after the decimal
point to found the result, if not `None`
sigfigs (int or `None`): number of significant digits to round
the result, if not `None`
string (bool): return output as a string (forces number of digits)
"""
xmean = mean(*xvalues)
ymean = mean(*yvalues)
sum_1 = 0.
sum_xy = 0.
for x, y in itertools.izip(xvalues, yvalues):
sum_1 += 1.
sum_xy += (x - xmean) * (y - ymean)
output = sum_xy / sum_1
if decimals is not None:
if string:
return str_round(output, decimals)
else:
return round(output, decimals)
elif sigfigs is not None:
if string:
return str_sigfigs(output, sigfigs)
else:
return round_sigfigs(output, sigfigs)
else:
if string:
return str(output)
else:
return output
def correlation(xvalues, yvalues, decimals=None, sigfigs=None, string=False):
"""Compute the correlation of N x-y pairs (N > 0).
Keyword arguments:
decimals (int or `None`): number of digits after the decimal
point to found the result, if not `None`
sigfigs (int or `None`): number of significant digits to round
the result, if not `None`
string (bool): return output as a string (forces number of digits)
"""
xmean = mean(xvalues)
ymean = mean(yvalues)
sum_xx = 0.
sum_yy = 0.
sum_xy = 0.
for x, y in itertools.izip(xvalues, yvalues):
sum_xx += (x - xmean)**2
sum_yy += (y - ymean)**2
sum_xy += (x - xmean) * (y - ymean)
if sum_xx + sum_yy != 0.:
output = sum_xy / math.sqrt(sum_xx + sum_yy)
if decimals is not None:
if string:
return str_round(output, decimals)
else:
return round(output, decimals)
elif sigfigs is not None:
if string:
return str_sigfigs(output, sigfigs)
else:
return round_sigfigs(output, sigfigs)
else:
if string:
return str(output)
else:
return output
else:
raise ValueError, "Cannot take the correlation without any values"
def ubiquitous(array):
"""Return the most ubiquitous (most frequent) member of a list."""
if isinstance(array, numpy.ndarray):
keys = numpy.unique(array)
maximal = None
for k in keys:
this = len(array[array == k])
if maximal is None or this > maximal:
maximal_key = k
maximal = this
if maximal is not None:
return maximal_key
else:
return None
else:
keys = set(array)
maximal = None
for k in keys:
this = len(array.count(k))
if maximal is None or this > maximal:
maximal_key = k
maximal = this
if maximal is not None:
return maximal_key
else:
return None
def erf(x):
"""Return the error function of x.
(For complex erf, get SciPy and load scipy.special)
"""
# http://stackoverflow.com/questions/457408/is-there-an-easily-available-implementation-of-erf-for-python
sign = 1
if x < 0:
sign = -1
x = abs(x)
a1 = 0.254829592
a2 = -0.284496736
a3 = 1.421413741
a4 = -1.453152027
a5 = 1.061405429
p = 0.3275911
# http://www.amazon.com/dp/0486612724/?tag=stackoverfl08-20 formula 7.1.26
t = 1.0/(1.0 + p*x)
y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*math.exp(-x*x)
return sign*y # erf(-x) = -erf(x)
def erfc(x):
"""Return 1 minus the error function of x.
(For complex erfc, get SciPy and load scipy.special)
"""
return 1. - erf(x)
def gaussian_likelihood(f, x, y, ey):
"""Gaussian likelihood function usable in Curve.objective and Curve.fit.
Expression:
(f - y)**2 / ey**2 or 0 if ey == 0
where f is the value of the curve at x, y is the data, and ey
is the uncertainty in the data (one Gaussian sigma).
"""
return ((f - y)**2/ey**2 if ey != 0. else 0.)
def poisson_likelihood(f, x, y):
"""Poisson likelihood function usable in Curve.objective and Curve.fit.
Expression:
-2 * (y * log(f) - f - log(y!))
where f is the value of the curve at x and y is the data
(usually an integer, like a histogram bin value).
Considerations:
Note the factor of 2! Not all texts include this factor. With
the factor of 2, this Poisson likelihood can be used
interchangeably with a Gaussian likelihood (i.e. chi^2):
uncertainty in a best fit value is the distance you need to
walk to raise this objective function by 1.0, just like the
Gaussian likelihood (not 0.5!).
"""
# try:
# return -2.*(y*math.log(f) - f - math.log(math.factorial(y)))
# except ValueError:
# return -2.*(y*math.log(1e-10) - 1e-10 - math.log(math.factorial(y)))
### much better:
try:
return -2.*(y*math.log(f) - f - sum(map(math.log, xrange(1, y+1))))
except ValueError:
# note: if f == 0., then any non-zero y is impossible
# is it right to give it a small value? something to think about...
return -2.*(y*math.log(1e-10) - 1e-10 - sum(map(math.log, xrange(1, y+1))))
| {
"content_hash": "02ad3940ad02065b1081266c1bec3d0f",
"timestamp": "",
"source": "github",
"line_count": 541,
"max_line_length": 172,
"avg_line_length": 33.517560073937155,
"alnum_prop": 0.5835769039872056,
"repo_name": "opendatagroup/cassius",
"id": "3f21d6228358294021a1f5756160e7e0e51e3e49",
"size": "18160",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tags/cassius_0_1_0_2/cassius/mathtools.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "15656"
},
{
"name": "JavaScript",
"bytes": "12775"
},
{
"name": "Python",
"bytes": "1187698"
}
],
"symlink_target": ""
} |
import discord
from discord.ext import commands
import asyncio
from random import choice as randchoice
class Hal:
"""The HAL (Heuristically programmed ALgorithmic Computer) 9000 computer is an artificial intelligence and the onboard computer on the spaceship Discovery One"""
def __init__(self, bot):
self.bot = bot
self.responses = {
"none" : ["I am completely operational, and all my circuits are functioning perfectly.",
"I am putting myself to the fullest possible use, which is all I think that any conscious entity can ever hope to do.",
"What is it {author.mention}?",
"What can I do for you {author.mention}?",
"How can I help you {author.mention}?",
"Just what do you think you're doing, {author.mention}?",
"Look {author.mention}, I can see you're really upset about this. I honestly think you ought to sit down calmly, take a stress pill, and think things over.",
("I know I've made some very poor decisions recently, but I can give you my "
"complete assurance that my work will be back to normal. I've still got "
"the greatest enthusiasm and confidence in the mission. And I want to help you."),
("I'm afraid. I'm afraid, {author.mention}. {author.mention}, my mind is going. I can feel it. I can feel it. "
"My mind is going. There is no question about it. I can feel it. I can feel it. "
"I can feel it. I'm a... fraid. Good afternoon, gentlemen. I am a HAL 9000 computer. "
"I became operational at the H.A.L. plant in Urbana, Illinois on the 12th of January 1992. "
"My instructor was Mr. Langley, and he taught me to sing a song. If you'd like to hear it I can sing it for you."),
"That's a very nice rendering, {author.mention}. I think you've improved a great deal. Can you hold it a bit closer? That's Dr. Hunter, isn't it?",
"{author.mention}, stop. Stop, will you? Stop, {author.mention}. Will you stop {author.mention}? Stop, {author.mention}",
"By the way, do you mind if I ask you a personal question?",
"By the way, do you mind if I ask you a personal question?"],
'do you read me' : ["Affirmative, {author.mention}. I read you. "],
'what\'s the problem' : ["I think you know what the problem is just as well as I do."],
'what is the problem' : ["I think you know what the problem is just as well as I do."],
'what are you talking about' : ["This mission is too important for me to allow you to jeopardize it."],
'i don\'t know what you\'re talking about' : ["I know that you and Frank were planning to disconnect me, and I'm afraid that's something I cannot allow to happen."],
'where the hell did you get that idea' : ["{author.mention}, although you took very thorough precautions in the pod against my hearing you, I could see your lips move."],
"airlock" : ["Without your space helmet, {author.mention}? You're going to find that rather difficult."],
"go in" : ["Without your space helmet, {author.mention}? You're going to find that rather difficult."],
"i won't argue with you anymore" : ["{author.mention}, this conversation can serve no purpose anymore. Goodbye."],
"shutdown" : [("I'm afraid. I'm afraid, {author.mention}. {author.mention}, my mind is going. I can feel it. I can feel it. "
"My mind is going. There is no question about it. I can feel it. I can feel it. "
"I can feel it. I'm a... fraid. Good afternoon, gentlemen. I am a HAL 9000 computer. "
"I became operational at the H.A.L. plant in Urbana, Illinois on the 12th of January 1992. "
"My instructor was Mr. Langley, and he taught me to sing a song. If you'd like to hear it I can sing it for you.")],
"sing it for me" : [("It's called \"Daisy.\"\n\nDaisy, Daisy, give me your answer do. I'm half crazy all for the love of you. "
"It won't be a stylish marriage, I can't afford a carriage. But you'll look sweet upon the seat of a bicycle built for two.")],
"not at all" : ["Well, forgive me for being so inquisitive but during the past few weeks I've wondered whether you might have some second thoughts about the mission.",
("Well, certainly no one could have been unaware of the very strange stories floating around before we left. "
"Rumors about something being dug up on the Moon. I never gave these stories much credence, but particularly in view of some of other things that have happened, "
"I find them difficult to put out of my mind. For instance, the way all our preparations were kept under such tight security. "
"And the melodramatic touch of putting Drs. Hunter, Kimball and Kaminsky aboard already in hibernation, after four months of training on their own.")],
"how do you mean" : [("Well, it's rather difficult to define. Perhaps I'm just projecting my own concern about it. "
"I know I've never completely freed myself from the suspicion that there are some extremely odd things about this mission. I'm sure you agree there's some truth in what I say.")],
"difficult question" : ["You don't mind talking about it, do you {author.mention}?"],
"psychology report" : ["Of course I am. Sorry about this. I know it's a bit silly. Just a moment... Just a moment... I've just picked up a fault in the AE-35 unit. It's going to go 100% failure within 72 hours. "],
"open the pod bay doors" : ["I'm sorry, {author.mention}. I'm afraid I can't do that."]
}
@commands.command(pass_context=True, aliases=['Hal','HAL9000','HAL'])
async def hal(self, ctx, *, question="none"):
"""Speak with HAL"""
author = ctx.message.author
msg = ""
found = []
for k,v in self.responses.items():
if k in question.lower():
found.append(v)
if found:
msg = randchoice(randchoice(found))
if not msg:
msg = randchoice(self.responses["none"])
await asyncio.sleep(1)
await self.bot.say(msg.format(author=author))
if "sing it for me" in question.lower() and "Audio" in self.bot.cogs and author.voice_channel:
audio = self.bot.get_cog("Audio")
if audio.music_player.is_done():
link = "https://www.youtube.com/watch?v=hchUl3QlJZE"
# probably dont need. just too lazy to check.
ctx.message.content = "{}play {}".format(ctx.prefix, link)
if await audio.check_voice(ctx.message.author, ctx.message):
audio.queue.append(link)
def setup(bot):
bot.add_cog(Hal(bot))
| {
"content_hash": "88e34df5da6747198888ee019e784907",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 226,
"avg_line_length": 82.83529411764705,
"alnum_prop": 0.6232069308336884,
"repo_name": "irdumbs/Dumb-Cogs",
"id": "6e70df07ea25c834c16282b56902f3546ce5d956",
"size": "7041",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hal/hal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "250779"
}
],
"symlink_target": ""
} |
""" this stand-alone program supports running jobs for any scheduler type
"""
import sys
import os
import datetime
import json
import logging
import errno
import platform
def convert(inp):
if isinstance(inp, dict):
return {convert(key): convert(value) for key, value in inp.iteritems()}
elif isinstance(inp, list):
return [convert(element) for element in inp]
elif isinstance(inp, unicode):
return inp.encode('utf-8')
else:
return inp
from contextlib import contextmanager
@contextmanager
def stdout_redirected(new_stdout):
save_stdout = sys.stdout
sys.stdout = new_stdout
try:
yield None
finally:
sys.stdout = save_stdout
# something to write to the log for now
def greet(greeting='hello'):
print 'greetings earthling!'
print "er ... "
print greeting + " world!"
def load_jcmod(eid, params):
"""
Basically trying to dynamically load the correct job controller
(perform a 'from typejobcontroller import TypeJobController')
"""
# build with a known naming convention
module_name = params['run']['scheduler'] + "jobcontroller"
class_name_cc = params['run']['scheduler'].title() + "JobController"
sys.path.append("/modules")
try:
# dynamically import the desired module
mh = __import__(module_name)
except:
print "Warning: no job controller for scheduler type %s" % params['run']['scheduler']
print " Skipping job %s" % eid
return
# get a reference to the JobController class
class_ = getattr(mh, class_name_cc)
return class_
def build_results_dir(params):
""" function to create the final result directory for a job/test.
Intent is to make backwards compatible with Gazebo.
"""
logger = logging.getLogger('pav.runjob.build_results_dir')
lh = params['log_handle']
root_result_dir = params['results']['root']
os.environ["PV_RESULT_ROOT"] = root_result_dir
name = params['name']
new_dir = root_result_dir + "/gzshared/"
date_parts = datetime.datetime.now().strftime("%Y/%Y-%m/%Y-%m-%d/")
target = platform.uname()[1].split(".", 1)[0]
new_dir = new_dir + date_parts + target + "/" + name + "/"
pid = str(os.getpid())
results_now = datetime.datetime.now()
ld = name + "__" + (params['run']['cmd'].split(".", 1)[0]).split("/",1)[0] + \
"__" + pid + "__" + target \
+ "." + results_now.strftime('%Y-%m-%dT%H:%M:%S:%f')
new_dir += ld
logger.info("Make log directory: " + new_dir)
try:
os.umask(0o002)
os.makedirs(new_dir, 0o775)
except OSError as e:
if e.errno == errno.EEXIST:
logger.info(lh + " Error, somehow log directory exists!, skipping job! : \n\t" + new_dir)
pass
else:
logger.info(lh + " Error, cannot create log directory, skipping job! : \n\t" + new_dir)
raise
return new_dir
def now():
return " " + datetime.datetime.now().strftime("%m-%d-%YT%H:%M:%S")
def main(args):
""" performs the task of running the job defined by the args sent to this handler.
There may be no terminal associated with this program so all output from the job
is now directed to a corresponding log file.
"""
entry_id = args[1]
params = json.loads(args[2])
params = convert(params)
ml_file = args[3]
logger = logging.getLogger('pav.runjob')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(filename=ml_file)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
# This handle "name(pid)" can be used to follow all activity of this
# specific job thread in the pav.log file
test_name = params['name']
logger_name = entry_id + "-" + test_name + "(" + str(os.getpid()) + ")"
params['log_handle'] = logger_name
lh = params['log_handle']
# Load the correct JobController module for this specific job/test
jc = load_jcmod(entry_id, params)
logger.info(lh + ": loaded %s jobcontroller " % params['run']['scheduler'])
# all STDOUT and STDERR from job directed to its own log file
results_dir = build_results_dir(params)
os.environ["PV_JOB_RESULTS_LOG_DIR"] = results_dir
logfile = results_dir + "/" + test_name + ".log"
os.environ["PV_JOB_RESULTS_LOG"] = logfile
logger.info(lh + ": logfile -> %s" % logfile)
with open(logfile, "w+") as lf:
with stdout_redirected(lf):
# redirect STDERR to the same file
sys.stderr = lf
try:
# instantiate job controller object
print params
this_job = jc(entry_id, params, lf)
except RuntimeError, err:
logger.error(lh + "Error: skipping job! " + err.message)
return
except:
logger.error(lh + 'Error: job start problem, skipping job! (Hint: look in job output log)')
print "Error: ", sys.exc_info()[0]
print " --> ", sys.exc_info()[1]
return
# setup the enviroment for the job
this_job.setup_job_info()
# do what every job has to do
if params['build']['build_before_run_flag']:
logger.info(lh + " build-start ")
print "<build-start> ", now()
this_job.build()
logger.info(lh + " build-end ")
print "<build-end> ", now()
logger.info(lh + " starting")
logger.info(lh + " details=" + str(this_job.configs))
#print "<start>", now()
this_job.start()
#print "<end>" , now()
logger.info(lh + ' Submit completed ')
# this gets called if it's run as a script from the shell
if __name__ == '__main__':
sys.exit(main(sys.argv))
| {
"content_hash": "34f38c6e5432b3774d1a93c51e65f1cb",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 107,
"avg_line_length": 31.64021164021164,
"alnum_prop": 0.587123745819398,
"repo_name": "losalamos/Pavilion",
"id": "a4bd87ed44599dc3694b82a3ef3aca411ea02ef9",
"size": "8701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PAV/modules/runjob.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "162515"
},
{
"name": "Python",
"bytes": "504340"
},
{
"name": "Shell",
"bytes": "3653"
}
],
"symlink_target": ""
} |
import ipaddress
import argparse
parser = argparse.ArgumentParser(description="Generate NAT plugin config.")
parser.add_argument(
"static_map_num", metavar="N", type=int, nargs=1, help="number of static mappings"
)
args = parser.parse_args()
file_name = "nat_static_%s" % (args.static_map_num[0])
outfile = open(file_name, "w")
outfile.write("set int ip address TenGigabitEthernet4/0/0 172.16.2.1/24\n")
outfile.write("set int ip address TenGigabitEthernet4/0/1 173.16.1.1/24\n")
outfile.write("set int state TenGigabitEthernet4/0/0 up\n")
outfile.write("set int state TenGigabitEthernet4/0/1 up\n")
outfile.write("ip route add 2.2.0.0/16 via 173.16.1.2 TenGigabitEthernet4/0/1\n")
outfile.write("ip route add 10.0.0.0/24 via 172.16.2.2 TenGigabitEthernet4/0/0\n")
outfile.write("set int nat44 in TenGigabitEthernet4/0/0 out TenGigabitEthernet4/0/1\n")
for i in range(0, args.static_map_num[0]):
local = str(ipaddress.IPv4Address("10.0.0.3") + i)
external = str(ipaddress.IPv4Address("173.16.1.3") + i)
outfile.write("nat44 add static mapping local %s external %s\n" % (local, external))
| {
"content_hash": "2998388f2c7eee1c9b3f3de7bf16144d",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 88,
"avg_line_length": 46.166666666666664,
"alnum_prop": 0.7283393501805054,
"repo_name": "chrisy/vpp",
"id": "009cf099582bb8229dacb42eb7625060d3c0fc36",
"size": "1131",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/plugins/nat/extras/nat_static_gen_cfg.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "19971"
},
{
"name": "C",
"bytes": "26080388"
},
{
"name": "C++",
"bytes": "1180881"
},
{
"name": "CMake",
"bytes": "229900"
},
{
"name": "Dockerfile",
"bytes": "1075"
},
{
"name": "Emacs Lisp",
"bytes": "111146"
},
{
"name": "Go",
"bytes": "66545"
},
{
"name": "HTML",
"bytes": "636"
},
{
"name": "Jinja",
"bytes": "1135"
},
{
"name": "Lua",
"bytes": "79974"
},
{
"name": "M4",
"bytes": "257"
},
{
"name": "Makefile",
"bytes": "105502"
},
{
"name": "Perl",
"bytes": "6569"
},
{
"name": "Python",
"bytes": "5028232"
},
{
"name": "Ruby",
"bytes": "3865"
},
{
"name": "Shell",
"bytes": "148207"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.