text stringlengths 4 1.02M | meta dict |
|---|---|
#!/usr/bin/python
#
# Generate build parameter files based on build information.
# A C header is generated for C code, and a JSON file for
# build scripts etc which need to know the build config.
#
import os
import sys
import json
import optparse
import dukutil
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('--version', dest='version')
parser.add_option('--build', dest='build')
parser.add_option('--out-json', dest='out_json')
parser.add_option('--out-header', dest='out_header')
(opts, args) = parser.parse_args()
t = { 'version': opts.version, 'build': opts.build }
f = open(opts.out_json, 'wb')
f.write(dukutil.json_encode(t).encode('ascii'))
f.close()
f = open(opts.out_header, 'wb')
f.write('#ifndef DUK_BUILDPARAMS_H_INCLUDED\n')
f.write('#define DUK_BUILDPARAMS_H_INCLUDED\n')
f.write('/* automatically generated by genbuildparams.py, do not edit */\n')
f.write('\n')
f.write('/* DUK_VERSION is defined in duktape.h */')
f.write('#define DUK_BUILD "%s"\n' % opts.build)
f.write('\n')
f.write('#endif /* DUK_BUILDPARAMS_H_INCLUDED */\n')
f.close()
| {
"content_hash": "8c00b62ce648cbcbec224d0e9d3bc7de",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 77,
"avg_line_length": 28.846153846153847,
"alnum_prop": 0.6702222222222223,
"repo_name": "JoshEngebretson/duktape",
"id": "db448833b81d4fc90250b14a699c17c1183b4aa5",
"size": "1125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/genbuildparams.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1972812"
},
{
"name": "C++",
"bytes": "20922"
},
{
"name": "CoffeeScript",
"bytes": "895"
},
{
"name": "JavaScript",
"bytes": "15926045"
},
{
"name": "Objective-C",
"bytes": "6054"
},
{
"name": "Python",
"bytes": "136104"
},
{
"name": "Shell",
"bytes": "12610"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from collections import OrderedDict
from .exceptions import (DatasetNotFound, InitException, ModelNotFound, WrongParameter)
from sklearn.externals import joblib
def _split_path(path):
""" A helper function that splits the path into a list
Parameters
----------
path : str
path to split
"""
path = os.path.normpath(path)
head, tail = os.path.split(path)
if not head:
return [tail]
elif head == path:
if tail:
return [head, tail]
else:
return [head]
else:
if not tail:
return _split_path(head)
else:
return _split_path(head) + [tail]
class PipelineFinder(OrderedDict):
"""Walk through the hierarchy of existing models
and find the processing pipeline that terminate with the
model having the given uid
Parameters
----------
uid : str
a unique model id
cache_dir : str
folder where models are saved
ingestion_method : str, default='vectorizer'
default ingestion method (one of ['vectorizer', 'parser'])
unless email threading is used, this whould be set to 'vectorizer'
Returns
-------
result : OrderedDict
the prior processing pipeline with as keys the processing step type
and as values the model ids
"""
def __init__(self, mid=None, cache_dir="/tmp/", ingestion_method='vectorizer', steps=None):
self.ingestion_method = ingestion_method
self._loaded_models = {}
self.mid = mid
if steps is None:
steps = OrderedDict()
self.cache_dir = self._normalize_cachedir(cache_dir)
super(PipelineFinder, self).__init__(steps)
@staticmethod
def _normalize_cachedir(cache_dir):
""" Normalize the cachedir path. This ensures that the cache_dir
ends with "ediscovery_cache"
"""
cache_dir = os.path.normpath(cache_dir)
if 'ediscovery_cache' not in cache_dir: # not very pretty
cache_dir = os.path.join(cache_dir, "ediscovery_cache")
return cache_dir
@classmethod
def by_id(cls, mid, cache_dir="/tmp/", ingestion_method='vectorizer'):
""" Find a pipeline by id
Parameters
----------
mid : str
a unique model id
cache_dir : str
folder where models are saved
ingestion_method : str, default='vectorizer'
default ingestion method (one of ['vectorizer', 'parser'])
unless email threading is used, this whould be set to 'vectorizer'
Returns
-------
result : OrderedDict
the prior processing pipeline with as keys the processing step type
and as values the model ids
"""
cache_dir = cls._normalize_cachedir(cache_dir)
pipeline = cls(mid=mid, cache_dir=cache_dir,
ingestion_method=ingestion_method)
cache_dir_base = os.path.dirname(cache_dir)
_break_flag = False
for root, subdirs, files in os.walk(cache_dir):
root = os.path.relpath(root, cache_dir_base)
for sdir in subdirs:
path = os.path.join(root, sdir)
path_hierarchy = _split_path(path)
if len(path_hierarchy) % 2 == 1:
# the path is of the form
# ['ediscovery_cache']
# or ['ediscovery_cache', 'ce196de4c7de4e57', 'cluster']
# ignore it
continue
if path_hierarchy[-1] == mid:
# found the model
_break_flag = True
break
if _break_flag:
break
else:
raise ModelNotFound('Model id {} not found in {}!'.format(mid, cache_dir))
if path_hierarchy[0] == 'ediscovery_cache':
path_hierarchy[0] = ingestion_method
else:
raise ValueError('path_hierarchy should start with ediscovery_cache',
'this indicates a bug in the code')
for idx in range(len(path_hierarchy)//2):
key, val = path_hierarchy[2*idx], path_hierarchy[2*idx+1]
if key in pipeline:
raise NotImplementedError('The current PipelineFinder class does not support'
'multiple identical processing steps'
'duplicates of {} found!'.format(key))
pipeline[key] = val
return pipeline
@property
def parent(self):
""" Make a new pipeline without the latest node """
if len(self.keys()) <= 1:
raise ValueError("Can't take the parent of a root node!")
# create a copy
steps = OrderedDict(self)
# get all the steps except the last one
steps.popitem(last=True)
return PipelineFinder(mid=list(steps.values())[-1],
cache_dir=self.cache_dir,
ingestion_method=self.ingestion_method,
steps=steps)
@property
def data(self):
""" Load the data provided by the last node of the pipeline """
last_node = list(self.keys())[-1]
ds_path = self.get_path(self[last_node])
if last_node == "vectorizer":
full_path = os.path.join(ds_path, 'features')
elif last_node == 'lsi':
full_path = os.path.join(ds_path, 'data')
return joblib.load(full_path)
def get_path(self, mid=None, absolute=True):
""" Find the path to the model specified by mid """
import itertools
if mid is None:
mid = self.mid
if mid not in self.values():
raise ValueError('{} is not a processing step current pipeline,\n {}'.format(mid, self))
idx = list(self.values()).index(mid)
valid_keys = list(self.keys())[:idx+1]
path = list(itertools.chain.from_iterable(
[[key, self[key]] for key in valid_keys]))
if absolute:
del path[0] # "ediscovery_cache" is already present in cache_dir
rel_path = os.path.join(*path)
return os.path.join(self.cache_dir, rel_path)
else:
path[0] = 'ediscovery_cache'
return os.path.join(*path)
| {
"content_hash": "ea4f965935658bc0306b74f06b2aa360",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 101,
"avg_line_length": 33.08585858585859,
"alnum_prop": 0.562051595176309,
"repo_name": "kcompher/FreeDiscovUI",
"id": "92aa91ed8a0c8c7a20d909443ba367892ce2f44a",
"size": "6576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "freediscovery/pipeline.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "404"
},
{
"name": "Makefile",
"bytes": "598"
},
{
"name": "Nginx",
"bytes": "451"
},
{
"name": "Python",
"bytes": "333007"
},
{
"name": "Shell",
"bytes": "3721"
}
],
"symlink_target": ""
} |
import socket
import sys
import time
from ploxys import main
host = input("Type the master's ip: ")
porta = 600
ok = True #the ok is again, for avoid excessive printing
while True:
time.sleep(0.1) #basic time waiting so it wont flood the connection
while True:
resultado = 0 #reset data
final = 0
resposta = 0
inputs = []
try: #lots of error handling and error reporting
mysock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) # creates the socket
except socket.error:
if ok is True:
print ("Error in creating the socket") # Finishes with error report
ok = False
break
try:
mysock.connect((host,porta)) # connects to the host
ok = True
except socket.error:
if ok is True:
print ("Error in connecting with the master")
ok = False
break
try:
mysock.send(bytes("ready","utf-8")) # sends the ready status to the server
resposta = mysock.recv(1024).decode("utf-8") #receives the task or the waiting command
mysock.close()
if resposta == "wait": #if it must wait, the slave will break the loop and get inside it again
break
print ("Got a task: " + resposta) #if it received a task, it will print it
ok = True
except:
e = sys.exc_info()[0]
if ok is True:
print ("Error in communicating with master")
print ("Error %s" % e)
ok = False
break
try:
inputs = [int(i) for i in resposta.split(",")] #converts the data to input
resultado = main(inputs[0],inputs[1],inputs[2],inputs[3]) #inputs the data into the ploxys function
ok = True
except:
e = sys.exc_info()[0]
if ok is True:
print ("Error %s" % e)
print("Error in calculating result, maybe data could not been found")
ok = False
break
try:
final = "done|" + resposta + "|" + str(resultado) #formats the resulting data as the protocol demands
print (resultado)
mysock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) #sets the connections, connects and sends the data
mysock.connect((host,porta))
mysock.send(final.encode("utf-8"))
ok = True
except:
if ok is True:
print ("Error in answering the master")
ok = False
break
mysock.close() #closes the connections
saida = input("Type enter to exit") | {
"content_hash": "6ef14db05aee36b7d8aa4790b832cc2e",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 120,
"avg_line_length": 38.885714285714286,
"alnum_prop": 0.5470242468772961,
"repo_name": "victor-cortez/Heimdall",
"id": "45d2aa60ffc52d9e1c83e8868ee310963a5230aa",
"size": "2722",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Heimdall_slave.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "87510"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class OffsetValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="offset", parent_name="funnel", **kwargs):
super(OffsetValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", False),
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
| {
"content_hash": "ca1547924aa1bd34bf9ea89055f97529",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 77,
"avg_line_length": 37.083333333333336,
"alnum_prop": 0.6089887640449438,
"repo_name": "plotly/plotly.py",
"id": "32304632b4b60526f56f262c30b2d406973f498c",
"size": "445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/funnel/_offset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import datetime
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'django-controlcenter'
copyright = ('{}, Django-controlcenter developers and contributors'
.format(datetime.date.today().year))
version = '0.2.9'
release = '0.2.9'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'default'
htmlhelp_basename = 'django-controlcenterdoc'
latex_elements = {}
latex_documents = [
(master_doc, 'django-controlcenter.tex',
'Django-controlcenter Documentation', 'Murad Byashimov', 'manual'),
]
man_pages = [
(master_doc, 'django-controlcenter', 'Django-controlcenter Documentation',
['Django-controlcenter developers and contributors'], 1)
]
texinfo_documents = [
(master_doc, 'django-controlcenter', 'Django-controlcenter Documentation',
'Django-controlcenter developers and contributors',
'django-controlcenter', 'One line description of project.',
'Miscellaneous'),
]
| {
"content_hash": "49022ed777c0f4bb8b7e1b8528d6d3a0",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 78,
"avg_line_length": 32.86666666666667,
"alnum_prop": 0.7058823529411765,
"repo_name": "byashimov/django-controlcenter",
"id": "4c242f33850e825e19c719547e4a57695ffc9b74",
"size": "1110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16706"
},
{
"name": "HTML",
"bytes": "11165"
},
{
"name": "JavaScript",
"bytes": "2259"
},
{
"name": "Python",
"bytes": "57852"
},
{
"name": "Stylus",
"bytes": "9321"
}
],
"symlink_target": ""
} |
import os
import urllib2, json
from urlparse import urlparse
def ParseURL(agsURL):
ags = []
print agsURL
ags = urlparse(agsURL)
return ags
def GetFolders(agsURL):
f = urllib2.urlopen(agsURL)
j = json.loads(f.read())
for item in j["folders"]:
print item
def MapServiceQuery(agsURL):
f = urllib2.urlopen(agsURL)
#print f.read()
j = json.loads(f.read())
for item in j["layers"]:
print item["name"] | {
"content_hash": "149b628f07c15d7b5feb71243731bf4d",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 31,
"avg_line_length": 21.454545454545453,
"alnum_prop": 0.6101694915254238,
"repo_name": "ACueva/Avi-Playground",
"id": "4913c3ea285b469820f3898e3feff4274634fe9e",
"size": "494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "VerifyServer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8074"
}
],
"symlink_target": ""
} |
"""
Landing page controller var miscellaneous pages.
"""
from flask import render_template
def create_landing_page_routes(app):
@app.route('/')
def root():
return 'Hello from BeamIt!!'
@app.route('/android/download')
def android_download():
return render_template('androidAppDownload.html')
| {
"content_hash": "7e8c48e36ab21d0957e41f0c091279d9",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 57,
"avg_line_length": 21.8,
"alnum_prop": 0.6697247706422018,
"repo_name": "ksweta/BeamIt-Server",
"id": "626b91cc39a74d60acc87ea13e826385a9e5e77d",
"size": "327",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "beamit/controllers/landing_page.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "148"
},
{
"name": "Python",
"bytes": "32100"
}
],
"symlink_target": ""
} |
from .dr7 import DR7
from .dr8 import DR8
from .dr9 import DR9
from .dr10 import DR10
from .common import band_name, band_index, band_names, cas_flags
from .common import photo_flags1_info, photo_flags2_info, photo_flags1_map, photo_flags2_map
from .common import munu_to_radec_deg, AsTransWrapper, AsTrans
from .fields import *
| {
"content_hash": "20275f241e9cfcb47813febc81c61b2c",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 92,
"avg_line_length": 33.1,
"alnum_prop": 0.7764350453172205,
"repo_name": "olebole/astrometry.net",
"id": "2cc4f27eaf2b7583c39d5a01afce2ff8bafc66b3",
"size": "440",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sdss/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Awk",
"bytes": "5709"
},
{
"name": "C",
"bytes": "5253753"
},
{
"name": "C++",
"bytes": "133792"
},
{
"name": "CSS",
"bytes": "13138"
},
{
"name": "Gnuplot",
"bytes": "482"
},
{
"name": "HTML",
"bytes": "104582"
},
{
"name": "JavaScript",
"bytes": "29452"
},
{
"name": "Makefile",
"bytes": "78759"
},
{
"name": "Objective-C",
"bytes": "15247"
},
{
"name": "PHP",
"bytes": "1189"
},
{
"name": "PLSQL",
"bytes": "71"
},
{
"name": "Perl",
"bytes": "837"
},
{
"name": "Python",
"bytes": "1065731"
},
{
"name": "Shell",
"bytes": "22113"
}
],
"symlink_target": ""
} |
from warnings import warn
from avocado.core import loader
class BaseOperatorMetaclass(type):
def __new__(cls, name, bases, attrs):
new_cls = type.__new__(cls, name, bases, attrs)
new_cls.uid = ('-' if new_cls.negated else '') + new_cls.lookup
return new_cls
class BaseOperator(object):
__metaclass__ = BaseOperatorMetaclass
lookup = ''
short_name = ''
verbose_name = ''
negated = False
def __unicode__(self):
return u'{0} ({1})'.format(self.verbose_name, self.uid)
def __repr__(self):
return u'<Operator: "{0}" ({1})>'.format(self.verbose_name, self.uid)
@property
def operator(self):
warn('self.operator is deprecated, use self.lookup instead',
DeprecationWarning)
return self.lookup
def coerce_to_unicode(self, value):
return unicode(value)
def is_valid(self, value):
raise NotImplemented('Use an Operator subclass')
def text(self, value):
raise NotImplemented('Use an Operator subclass')
class SimpleTypeOperator(BaseOperator):
"Operator class for non-container type values. Includes strings."
def is_valid(self, value):
return not hasattr(value, '__iter__')
def text(self, value):
value = self.coerce_to_unicode(value)
return u'{0} {1}'.format(self.verbose_name, value)
class StringOperator(SimpleTypeOperator):
"Operator class for string-only lookups."
def is_valid(self, value):
return isinstance(value, basestring)
class ContainerTypeOperator(BaseOperator):
"Operator class for container type values. Excludes strings."
join_string = 'and'
max_list_size = 3
def is_valid(self, value):
return hasattr(value, '__iter__')
def text(self, value):
value = map(self.coerce_to_unicode, value)
length = len(value)
if length == 1:
if self.negated:
name = NotExact.verbose_name
else:
name = Exact.verbose_name
return u'{0} {1}'.format(name, value[0])
last = value.pop()
length -= 1
if length > self.max_list_size:
head = value[:self.max_list_size]
else:
head = value
text = '{0} {1}'.format(self.verbose_name, ', '.join(head))
# Add the leftover item count for the tail of the list
tail = length - self.max_list_size
if tail > 0:
text += u' ... ({0} more)'.format(tail)
return u'{0} {1} {2}'.format(text, self.join_string, last)
class Null(BaseOperator):
lookup = 'isnull'
short_name = 'is null'
verbose_name = 'is null'
def is_valid(self, value):
return isinstance(value, bool)
def text(self, value):
"Do not return value"
return unicode(self.verbose_name if value else NotNull.verbose_name)
class NotNull(Null):
short_name = 'not null'
verbose_name = 'is not null'
negated = True
def text(self, value):
"Do not return value"
return unicode(self.verbose_name if value else Null.verbose_name)
class Exact(SimpleTypeOperator):
lookup = 'exact'
short_name = '='
verbose_name = 'is'
class NotExact(Exact):
short_name = '!='
verbose_name = 'is not'
negated = True
def text(self, value):
# Easier to read 'is False', rather than 'is not True'
if isinstance(value, bool):
return u'is {0}'.format(not value)
return super(NotExact, self).text(value)
# String-specific lookups
class InsensitiveExact(StringOperator):
lookup = 'iexact'
short_name = '='
verbose_name = 'is'
class InsensitiveNotExact(InsensitiveExact):
short_name = '!='
verbose_name = 'is not'
negated = True
class Contains(StringOperator):
lookup = 'contains'
short_name = 'contains'
verbose_name = 'contains the text'
class InsensitiveContains(Contains):
lookup = 'icontains'
class NotContains(Contains):
short_name = 'does not contain'
verbose_name = 'does not contain the text'
negated = True
class NotInsensitiveContains(InsensitiveContains):
short_name = 'does not contain'
verbose_name = 'does not contain the text'
negated = True
class Regex(StringOperator):
lookup = 'regex'
short_name = '=~'
verbose_name = 'matches'
class InsensitiveRegex(Regex):
lookup = 'iregex'
class NotRegex(Regex):
short_name = '!=~'
versbose_name = 'does not match'
negated = True
class NotInsensitiveRegex(InsensitiveRegex):
short_name = '!=~'
verbose_name = 'does not match'
negated = True
# Numerical and lexicographical lookups
class LessThan(SimpleTypeOperator):
lookup = 'lt'
short_name = '<'
verbose_name = 'is less than'
class GreaterThan(SimpleTypeOperator):
lookup = 'gt'
short_name = '>'
verbose_name = 'is greater than'
class LessThanOrEqual(SimpleTypeOperator):
lookup = 'lte'
short_name = '<='
verbose_name = 'is less than or equal to'
class GreaterThanOrEqual(SimpleTypeOperator):
lookup = 'gte'
short_name = '>='
verbose_name = 'is greater than or equal to'
# Operators for container types (excluding strings)
class InList(ContainerTypeOperator):
lookup = 'in'
join_string = 'or'
short_name = 'includes'
verbose_name = 'is either'
class NotInList(InList):
join_string = 'nor'
short_name = 'excludes'
verbose_name = 'is neither'
negated = True
class Range(ContainerTypeOperator):
join_string = 'and'
lookup = 'range'
short_name = 'between'
verbose_name = 'is between'
def is_valid(self, value):
return super(Range, self).is_valid(value) and len(value) == 2
def text(self, value):
value = map(self.coerce_to_unicode, value)
return u'{0} {1}'.format(self.verbose_name, ' and '.join(value))
class NotRange(Range):
short_name = 'not between'
verbose_name = 'is not between'
negated = True
# Register operators
registry = loader.Registry()
# General equality
registry.register(Exact, Exact.uid)
registry.register(NotExact, NotExact.uid)
# String operators
registry.register(InsensitiveExact, InsensitiveExact.uid)
registry.register(Contains, Contains.uid)
registry.register(InsensitiveContains, InsensitiveContains.uid)
registry.register(InsensitiveNotExact, InsensitiveNotExact.uid)
registry.register(NotContains, NotContains.uid)
registry.register(NotInsensitiveContains, NotInsensitiveContains.uid)
registry.register(Regex, Regex.uid)
registry.register(NotRegex, NotRegex.uid)
registry.register(InsensitiveRegex, InsensitiveRegex.uid)
registry.register(NotInsensitiveRegex, NotInsensitiveRegex.uid)
# Null
registry.register(Null, Null.uid)
registry.register(NotNull, NotNull.uid)
# Numerical or lexicographical comparison
registry.register(LessThan, LessThan.uid)
registry.register(GreaterThan, GreaterThan.uid)
registry.register(LessThanOrEqual, LessThanOrEqual.uid)
registry.register(GreaterThanOrEqual, GreaterThanOrEqual.uid)
# List
registry.register(InList, InList.uid)
registry.register(NotInList, NotInList.uid)
# Range
registry.register(Range, Range.uid)
registry.register(NotRange, NotRange.uid)
loader.autodiscover('operators')
| {
"content_hash": "6e890e48d4cf8956906cdb1e102a539e",
"timestamp": "",
"source": "github",
"line_count": 290,
"max_line_length": 77,
"avg_line_length": 25.110344827586207,
"alnum_prop": 0.6606701455644054,
"repo_name": "murphyke/avocado",
"id": "4b38e5bfa61ccab2e86418e0f22379fd77dca983",
"size": "7282",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "avocado/query/operators.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "18009"
},
{
"name": "Makefile",
"bytes": "84"
},
{
"name": "Python",
"bytes": "1035156"
},
{
"name": "R",
"bytes": "273"
},
{
"name": "SAS",
"bytes": "689"
},
{
"name": "Shell",
"bytes": "2369"
}
],
"symlink_target": ""
} |
from neutron_lib.api import converters
from neutron_lib import constants
from neutron_lib.db import constants as db_const
NAME = 'Neutron L3 Subnet Pool'
ALIAS = 'subnetpool'
DESCRIPTION = "Layer 3 subnet pool abstraction"
UPDATED_TIMESTAMP = "2012-01-01T10:00:00-00:00"
RESOURCE_NAME = 'subnetpool'
COLLECTION_NAME = 'subnetpools'
RESOURCE_ATTRIBUTE_MAP = {
COLLECTION_NAME: {
'id': {'allow_post': False,
'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'is_filter': True,
'is_sort_key': True,
'primary_key': True},
'name': {'allow_post': True,
'allow_put': True,
'validate': {'type:not_empty_string': None},
'is_filter': True,
'is_sort_key': True,
'is_visible': True},
'tenant_id': {'allow_post': True,
'allow_put': False,
'validate': {
'type:string': db_const.PROJECT_ID_FIELD_SIZE},
'required_by_policy': True,
'is_filter': True,
'is_sort_key': True,
'is_visible': True},
'prefixes': {'allow_post': True,
'allow_put': True,
'validate': {'type:subnet_list': None},
'is_visible': True},
'default_quota': {'allow_post': True,
'allow_put': True,
'validate': {'type:non_negative': None},
'convert_to': converters.convert_to_int,
'default': constants.ATTR_NOT_SPECIFIED,
'is_filter': True,
'is_sort_key': True,
'is_visible': True},
'ip_version': {'allow_post': False,
'allow_put': False,
'is_filter': True,
'is_sort_key': True,
'is_visible': True},
'default_prefixlen': {'allow_post': True,
'allow_put': True,
'validate': {'type:non_negative': None},
'convert_to': converters.convert_to_int,
'default': constants.ATTR_NOT_SPECIFIED,
'is_filter': True,
'is_sort_key': True,
'is_visible': True},
'min_prefixlen': {'allow_post': True,
'allow_put': True,
'default': constants.ATTR_NOT_SPECIFIED,
'validate': {'type:non_negative': None},
'convert_to': converters.convert_to_int,
'is_filter': True,
'is_sort_key': True,
'is_visible': True},
'max_prefixlen': {'allow_post': True,
'allow_put': True,
'default': constants.ATTR_NOT_SPECIFIED,
'validate': {'type:non_negative': None},
'convert_to': converters.convert_to_int,
'is_filter': True,
'is_sort_key': True,
'is_visible': True},
'is_default': {'allow_post': True,
'allow_put': True,
'default': False,
'convert_to': converters.convert_to_boolean,
'is_visible': True,
'is_filter': True,
'is_sort_key': True,
'required_by_policy': True,
'enforce_policy': True},
constants.SHARED: {
'allow_post': True,
'allow_put': False,
'default': False,
'convert_to': converters.convert_to_boolean,
'is_visible': True,
'is_filter': True,
'is_sort_key': True,
'required_by_policy': True,
'enforce_policy': True
}
}
}
# This is a core resource so the following are not applicable.
IS_SHIM_EXTENSION = False
IS_STANDARD_ATTR_EXTENSION = False
SUB_RESOURCE_ATTRIBUTE_MAP = {}
ACTION_MAP = {}
REQUIRED_EXTENSIONS = []
OPTIONAL_EXTENSIONS = []
ACTION_STATUS = {}
| {
"content_hash": "8cdcb9fbd6eff8941523432bf6c964c2",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 73,
"avg_line_length": 40.87155963302752,
"alnum_prop": 0.43389450056116724,
"repo_name": "openstack/neutron-lib",
"id": "136f4423294341bd0634a3c436c1212254fde820",
"size": "5028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron_lib/api/definitions/subnetpool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "3459"
},
{
"name": "HTML",
"bytes": "31248"
},
{
"name": "Python",
"bytes": "1522390"
},
{
"name": "Shell",
"bytes": "6100"
}
],
"symlink_target": ""
} |
import imp
import os
from contextlib import redirect_stderr, redirect_stdout
from optparse import OptionParser
import lldb
def __lldb_init_module(debugger, dict):
filePath = os.path.realpath(__file__)
lldbHelperDir = os.path.dirname(filePath)
commandsDirectory = os.path.join(lldbHelperDir, "commands")
loadCommandsInDirectory(commandsDirectory)
def loadCommandsInDirectory(commandsDirectory):
for file in os.listdir(commandsDirectory):
fileName, fileExtension = os.path.splitext(file)
if fileExtension == ".py":
module = imp.load_source(fileName, os.path.join(commandsDirectory, file))
if hasattr(module, "lldbinit"):
module.lldbinit()
if hasattr(module, "lldbcommands"):
module._loadedFunctions = {}
for command in module.lldbcommands():
loadCommand(
module, command, commandsDirectory, fileName, fileExtension
)
def loadCommand(module, command, directory, filename, extension):
func = makeRunCommand(command, os.path.join(directory, filename + extension))
name = command.name()
helpText = (
command.description().strip().splitlines()[0]
) # first line of description
key = filename + "_" + name
module._loadedFunctions[key] = func
functionName = "__" + key
lldb.debugger.HandleCommand(
"script "
+ functionName
+ " = sys.modules['"
+ module.__name__
+ "']._loadedFunctions['"
+ key
+ "']"
)
lldb.debugger.HandleCommand(
'command script add --help "{help}" --function {function} {name}'.format(
help=helpText.replace('"', '\\"'), # escape quotes
function=functionName,
name=name,
)
)
def makeRunCommand(command, filename):
def runCommand(debugger, input, exe_ctx, result, _):
# lldb assumes that any output meant for the user is written
# to the result object. By redirecting stdout here, we can
# use methods like print (or parse_args) in the command logic
# as if they are writing to stdout, but write to result
# instead. lldb will handle displaying it to the user.
with redirect_stdout(result), redirect_stderr(result):
command.result = result
command.context = exe_ctx
splitInput = command.lex(input)
# OptionParser will throw in the case where you want just one
# big long argument and no options and you enter something
# that starts with '-' in the argument. e.g.:
# somecommand -[SomeClass someSelector:]
# This solves that problem by prepending a '--' so that
# OptionParser does the right thing.
options = command.options()
if len(options) == 0:
if "--" not in splitInput:
splitInput.insert(0, "--")
parser = optionParserForCommand(command)
(options, args) = parser.parse_args(splitInput)
# When there are more args than the command has declared, assume
# the initial args form an expression and combine them into a single arg.
if len(args) > len(command.args()):
overhead = len(args) - len(command.args())
head = args[: overhead + 1] # Take N+1 and reduce to 1.
args = [" ".join(head)] + args[-overhead:]
if validateArgsForCommand(args, command):
command.run(args, options)
runCommand.__doc__ = helpForCommand(command, filename)
return runCommand
def validateArgsForCommand(args, command):
if len(args) < len(command.args()):
defaultArgs = [arg.default for arg in command.args()]
defaultArgsToAppend = defaultArgs[len(args) :]
index = len(args)
for defaultArg in defaultArgsToAppend:
if not defaultArg:
arg = command.args()[index]
print("Whoops! You are missing the <" + arg.argName + "> argument.")
print("\nUsage: " + usageForCommand(command))
return
index += 1
args.extend(defaultArgsToAppend)
return True
def optionParserForCommand(command):
parser = OptionParser()
for argument in command.options():
if argument.boolean:
parser.add_option(
argument.shortName,
argument.longName,
dest=argument.argName,
help=argument.help,
action=("store_false" if argument.default else "store_true"),
)
else:
parser.add_option(
argument.shortName,
argument.longName,
dest=argument.argName,
help=argument.help,
default=argument.default,
)
return parser
def helpForCommand(command, filename):
help = command.description()
argSyntax = ""
optionSyntax = ""
if command.args():
help += "\n\nArguments:"
for arg in command.args():
help += "\n <" + arg.argName + ">; "
if arg.argType:
help += "Type: " + arg.argType + "; "
help += arg.help
argSyntax += " <" + arg.argName + ">"
if command.options():
help += "\n\nOptions:"
for option in command.options():
if option.longName and option.shortName:
optionFlag = option.longName + "/" + option.shortName
elif option.longName:
optionFlag = option.longName
else:
optionFlag = option.shortName
help += "\n " + optionFlag + " "
if not option.boolean:
help += "<" + option.argName + ">; Type: " + option.argType
help += "; " + option.help
optionSyntax += " [{name}{arg}]".format(
name=(option.longName or option.shortName),
arg=("" if option.boolean else ("=" + option.argName)),
)
help += "\n\nSyntax: " + command.name() + optionSyntax + argSyntax
help += "\n\nThis command is implemented as %s in %s." % (
command.__class__.__name__,
filename,
)
return help
def usageForCommand(command):
usage = command.name()
for arg in command.args():
if arg.default:
usage += " [" + arg.argName + "]"
else:
usage += " " + arg.argName
return usage
| {
"content_hash": "6117e7ef4f2a5edad992c9c3e454cadf",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 85,
"avg_line_length": 32.50246305418719,
"alnum_prop": 0.5621400424371021,
"repo_name": "facebook/chisel",
"id": "585527b3f03ea5e425b97d7fe7f1fec467b7c42e",
"size": "6815",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "fbchisellldb.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1962"
},
{
"name": "C++",
"bytes": "1876"
},
{
"name": "Makefile",
"bytes": "321"
},
{
"name": "Objective-C",
"bytes": "5139"
},
{
"name": "Objective-C++",
"bytes": "11455"
},
{
"name": "Python",
"bytes": "208307"
}
],
"symlink_target": ""
} |
from __future__ import division, absolute_import, print_function
import os
import sys
import argparse
import redmapper
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compute random weights')
parser.add_argument('-c', '--configfile', action='store', type=str, required=True,
help='YAML config file')
parser.add_argument('-r', '--randfile', action='store', type=str, required=True,
help='Random file to compute weights')
parser.add_argument('-l', '--lambda_cuts', action='store', type=float, nargs='+',
required=True, help='Minimum richness')
args = parser.parse_args()
config = redmapper.Configuration(args.configfile)
weigher = redmapper.RandomWeigher(config, args.randfile)
for lambda_cut in args.lambda_cuts:
wt_randfile, wt_areafile = weigher.weight_randoms(lambda_cut)
print("Made weighted random file %s" % (wt_randfile))
print("Made weighted area file %s" % (wt_areafile))
| {
"content_hash": "e5547e9382e2afd443b2d942b4791ccd",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 86,
"avg_line_length": 39.96153846153846,
"alnum_prop": 0.6458132820019249,
"repo_name": "erykoff/redmapper",
"id": "b0e70b739f48b0ae2956751f14449a87bc139e8b",
"size": "1062",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "bin/redmapper_weight_randoms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "35922"
},
{
"name": "Dockerfile",
"bytes": "1872"
},
{
"name": "Python",
"bytes": "971787"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('historias', '0016_auto_20150429_2315'),
]
operations = [
migrations.AlterField(
model_name='historias',
name='fecha_ingreso',
field=models.DateField(default=datetime.datetime(2015, 5, 5, 0, 54, 34, 518729), help_text='Formato: dd/mm/yyyy', verbose_name='Fecha de Ingreso'),
preserve_default=True,
),
migrations.AlterField(
model_name='historias',
name='hora_ingreso',
field=models.TimeField(default=datetime.datetime(2015, 5, 5, 0, 54, 34, 518673), help_text='Formato: hh:mm', verbose_name='Hora de Ingreso'),
preserve_default=True,
),
]
| {
"content_hash": "3cc94241ecf23f5c35e36dc3b29b52b8",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 159,
"avg_line_length": 32.69230769230769,
"alnum_prop": 0.6105882352941177,
"repo_name": "btenaglia/hpc-historias-clinicas",
"id": "75a8adbd51a0a6a5922e61154522081fd7812af9",
"size": "874",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hpc-historias-clinicas/historias/migrations/0017_auto_20150505_0054.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "231102"
},
{
"name": "HTML",
"bytes": "148185"
},
{
"name": "JavaScript",
"bytes": "570412"
},
{
"name": "Python",
"bytes": "243694"
}
],
"symlink_target": ""
} |
"""Your job is to write a speech for your CEO. You have a list of meaningful
phrases that he is fond of such as "knowledge optimization initiatives" and
your task is to weave them in to a speech.
You also have the opening words "Our clear strategic direction is to invoke..."
and some useful joining phrases such as "whilst not forgetting".
The speech that you will write takes the opening words and randomly jumbles the
phrases alternated with joining phrases to make a more complete, if meaningless,
speech. After execution the speech might need some light editing.
Note to my current employer: This is no reflection whatsoever on the
organisation that I work for. This all comes from one of my former CEOs,
Stephen Elop, during his tenure at Nokia. The code is mine but the words are
all his, slightly transliterated ("<big corp.>" replaces "Nokia",
"<little corp.>" replaces "Symbian" and "<other corp.>" replaces "Microsoft").
Created on 19 Feb 2016
@author: paulross
"""
import random
import textwrap
OPENING_WORDS = ['Our', 'clear', 'strategic', 'direction', 'is', 'to', 'invoke',]
PHRASE_TABLE = (
("accountable", "transition", "leadership"),
("driving", "strategy", "implementation"),
("drilling down into", "active", "core business objectives"),
("next billion", "execution", "with our friends in <other corp.>"),
("creating", "next-generation", "franchise platform"),
("<big corp.>'s", "volume and", "value leadership"),
("significant", "end-user", "experience"),
("transition", "from <small corp.>", "to <other corp.>'s platform"),
("integrating", "shared", "services"),
("empowered to", "improve and expand", "our portfolio of experience"),
("deliver", "new", "innovation"),
("ramping up", "diverse", "collaboration"),
("next generation", "mobile", "ecosystem"),
("focus on", "growth and", "consumer delight"),
("management", "planning", "interlocks"),
("necessary", "operative", "capabilities"),
("knowledge", "optimization", "initiatives"),
("modular", "integration", "environment"),
("software", "creation", "processes"),
("agile", "working", "practices"),
)
INSERTS = ('for', 'with', 'and', 'as well as', 'by',
'whilst not forgetting',
'. Of course',
'. To be absolutely clear',
'. We need',
'and unrelenting',
'with unstoppable',
)
def get_phrase():
"""Return a phrase by choosing words at random from each column of the PHRASE_TABLE."""
# Your code goes here
pass
def get_insert():
"""Return a randomly chosen set of words to insert between phrases."""
# Your code goes here
pass
def write_speech(n):
"""Write a speech with the opening words followed by n random phrases
interspersed with random inserts."""
# Your code goes here
pass
if __name__ == '__main__':
write_speech(40)
| {
"content_hash": "8b1c4558d337d1d03e0a8e23c5ccf1f1",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 91,
"avg_line_length": 43.31578947368421,
"alnum_prop": 0.5826245443499393,
"repo_name": "manahl/PythonTrainingExercises",
"id": "99dd4c8e155520dc44438fa121b6d252ee6717ee",
"size": "3292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Beginners/stdlib/theCEOspeech/problem.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "285088"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
import math
import re
import django
from django import forms
from django.core.exceptions import ImproperlyConfigured
from django.db.models.fields import FieldDoesNotExist
from django.forms.models import fields_for_model
from django.template.loader import render_to_string
from django.utils.functional import curry
from django.utils.safestring import mark_safe
from django.utils.six import text_type
from django.utils.translation import ugettext_lazy
from taggit.managers import TaggableManager
from wagtail.utils.decorators import cached_classmethod
from wagtail.wagtailadmin import compare, widgets
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailcore.models import Page
from wagtail.wagtailcore.utils import camelcase_to_underscore, resolve_model_string
# DIRECT_FORM_FIELD_OVERRIDES, FORM_FIELD_OVERRIDES are imported for backwards
# compatibility, as people are likely importing them from here and then
# appending their own overrides
from .forms import ( # NOQA
DIRECT_FORM_FIELD_OVERRIDES, FORM_FIELD_OVERRIDES, WagtailAdminModelForm, WagtailAdminPageForm,
formfield_for_dbfield)
def widget_with_script(widget, script):
return mark_safe('{0}<script>{1}</script>'.format(widget, script))
def get_form_for_model(
model, form_class=WagtailAdminModelForm,
fields=None, exclude=None, formsets=None, exclude_formsets=None, widgets=None
):
# django's modelform_factory with a bit of custom behaviour
attrs = {'model': model}
if fields is not None:
attrs['fields'] = fields
if exclude is not None:
attrs['exclude'] = exclude
if widgets is not None:
attrs['widgets'] = widgets
if formsets is not None:
attrs['formsets'] = formsets
if exclude_formsets is not None:
attrs['exclude_formsets'] = exclude_formsets
# Give this new form class a reasonable name.
class_name = model.__name__ + str('Form')
bases = (object,)
if hasattr(form_class, 'Meta'):
bases = (form_class.Meta,) + bases
form_class_attrs = {
'Meta': type(str('Meta'), bases, attrs)
}
metaclass = type(form_class)
return metaclass(class_name, (form_class,), form_class_attrs)
def extract_panel_definitions_from_model_class(model, exclude=None):
if hasattr(model, 'panels'):
return model.panels
panels = []
_exclude = []
if exclude:
_exclude.extend(exclude)
fields = fields_for_model(model, exclude=_exclude, formfield_callback=formfield_for_dbfield)
for field_name, field in fields.items():
try:
panel_class = field.widget.get_panel()
except AttributeError:
panel_class = FieldPanel
panel = panel_class(field_name)
panels.append(panel)
return panels
class EditHandler(object):
"""
Abstract class providing sensible default behaviours for objects implementing
the EditHandler API
"""
# return list of widget overrides that this EditHandler wants to be in place
# on the form it receives
@classmethod
def widget_overrides(cls):
return {}
# return list of fields that this EditHandler expects to find on the form
@classmethod
def required_fields(cls):
return []
# return a dict of formsets that this EditHandler requires to be present
# as children of the ClusterForm; the dict is a mapping from relation name
# to parameters to be passed as part of get_form_for_model's 'formsets' kwarg
@classmethod
def required_formsets(cls):
return {}
# return any HTML that needs to be output on the edit page once per edit handler definition.
# Typically this will be used to define snippets of HTML within <script type="text/x-template"></script> blocks
# for Javascript code to work with.
@classmethod
def html_declarations(cls):
return ''
def __init__(self, instance=None, form=None):
if not instance:
raise ValueError("EditHandler did not receive an instance object")
self.instance = instance
if not form:
raise ValueError("EditHandler did not receive a form object")
self.form = form
# Heading / help text to display to the user
heading = ""
help_text = ""
def classes(self):
"""
Additional CSS classnames to add to whatever kind of object this is at output.
Subclasses of EditHandler should override this, invoking super(B, self).classes() to
append more classes specific to the situation.
"""
classes = []
try:
classes.append(self.classname)
except AttributeError:
pass
return classes
def field_type(self):
"""
The kind of field it is e.g boolean_field. Useful for better semantic markup of field display based on type
"""
return ""
def id_for_label(self):
"""
The ID to be used as the 'for' attribute of any <label> elements that refer
to this object but are rendered outside of it. Leave blank if this object does not render
as a single input field.
"""
return ""
def render_as_object(self):
"""
Render this object as it should appear within an ObjectList. Should not
include the <h2> heading or help text - ObjectList will supply those
"""
# by default, assume that the subclass provides a catch-all render() method
return self.render()
def render_as_field(self):
"""
Render this object as it should appear within a <ul class="fields"> list item
"""
# by default, assume that the subclass provides a catch-all render() method
return self.render()
def render_missing_fields(self):
"""
Helper function: render all of the fields that are defined on the form but not "claimed" by
any panels via required_fields. These fields are most likely to be hidden fields introduced
by the forms framework itself, such as ORDER / DELETE fields on formset members.
(If they aren't actually hidden fields, then they will appear as ugly unstyled / label-less fields
outside of the panel furniture. But there's not much we can do about that.)
"""
rendered_fields = self.required_fields()
missing_fields_html = [
text_type(self.form[field_name])
for field_name in self.form.fields
if field_name not in rendered_fields
]
return mark_safe(''.join(missing_fields_html))
def render_form_content(self):
"""
Render this as an 'object', ensuring that all fields necessary for a valid form
submission are included
"""
return mark_safe(self.render_as_object() + self.render_missing_fields())
@classmethod
def get_comparison(cls):
return []
class BaseCompositeEditHandler(EditHandler):
"""
Abstract class for EditHandlers that manage a set of sub-EditHandlers.
Concrete subclasses must attach a 'children' property
"""
_widget_overrides = None
@classmethod
def widget_overrides(cls):
if cls._widget_overrides is None:
# build a collated version of all its children's widget lists
widgets = {}
for handler_class in cls.children:
widgets.update(handler_class.widget_overrides())
cls._widget_overrides = widgets
return cls._widget_overrides
_required_fields = None
@classmethod
def required_fields(cls):
if cls._required_fields is None:
fields = []
for handler_class in cls.children:
fields.extend(handler_class.required_fields())
cls._required_fields = fields
return cls._required_fields
_required_formsets = None
@classmethod
def required_formsets(cls):
if cls._required_formsets is None:
formsets = {}
for handler_class in cls.children:
formsets.update(handler_class.required_formsets())
cls._required_formsets = formsets
return cls._required_formsets
@classmethod
def html_declarations(cls):
return mark_safe(''.join([c.html_declarations() for c in cls.children]))
def __init__(self, instance=None, form=None):
super(BaseCompositeEditHandler, self).__init__(instance=instance, form=form)
self.children = []
for child in self.__class__.children:
if not getattr(child, "children", None) and getattr(child, "field_name", None):
if self.form._meta.exclude:
if child.field_name in self.form._meta.exclude:
continue
if self.form._meta.fields:
if child.field_name not in self.form._meta.fields:
continue
self.children.append(child(instance=self.instance, form=self.form))
def render(self):
return mark_safe(render_to_string(self.template, {
'self': self
}))
@classmethod
def get_comparison(cls):
comparators = []
for child in cls.children:
comparators.extend(child.get_comparison())
return comparators
class BaseFormEditHandler(BaseCompositeEditHandler):
"""
Base class for edit handlers that can construct a form class for all their
child edit handlers.
"""
# The form class used as the base for constructing specific forms for this
# edit handler. Subclasses can override this attribute to provide a form
# with custom validation, for example. Custom forms must subclass
# WagtailAdminModelForm
base_form_class = None
_form_class = None
@classmethod
def get_form_class(cls, model):
"""
Construct a form class that has all the fields and formsets named in
the children of this edit handler.
"""
if cls._form_class is None:
# If a custom form class was passed to the EditHandler, use it.
# Otherwise, use the base_form_class from the model.
# If that is not defined, use WagtailAdminModelForm.
model_form_class = getattr(model, 'base_form_class', WagtailAdminModelForm)
base_form_class = cls.base_form_class or model_form_class
cls._form_class = get_form_for_model(
model,
form_class=base_form_class,
fields=cls.required_fields(),
formsets=cls.required_formsets(),
widgets=cls.widget_overrides())
return cls._form_class
class BaseTabbedInterface(BaseFormEditHandler):
template = "wagtailadmin/edit_handlers/tabbed_interface.html"
class TabbedInterface(object):
def __init__(self, children, base_form_class=None):
self.children = children
self.base_form_class = base_form_class
def bind_to_model(self, model):
return type(str('_TabbedInterface'), (BaseTabbedInterface,), {
'model': model,
'children': [child.bind_to_model(model) for child in self.children],
'base_form_class': self.base_form_class,
})
class BaseObjectList(BaseFormEditHandler):
template = "wagtailadmin/edit_handlers/object_list.html"
class ObjectList(object):
def __init__(self, children, heading="", classname="",
base_form_class=None):
self.children = children
self.heading = heading
self.classname = classname
self.base_form_class = base_form_class
def bind_to_model(self, model):
return type(str('_ObjectList'), (BaseObjectList,), {
'model': model,
'children': [child.bind_to_model(model) for child in self.children],
'heading': self.heading,
'classname': self.classname,
'base_form_class': self.base_form_class,
})
class BaseFieldRowPanel(BaseCompositeEditHandler):
template = "wagtailadmin/edit_handlers/field_row_panel.html"
class FieldRowPanel(object):
def __init__(self, children, classname=""):
self.children = children
self.classname = classname
def bind_to_model(self, model):
col_count = " col" + str(int(math.floor(12 / len(self.children))))
# If child panel doesn't have a col# class then append default based on
# number of columns
for child in self.children:
if not re.search(r'\bcol\d+\b', child.classname):
child.classname += col_count
return type(str('_FieldRowPanel'), (BaseFieldRowPanel,), {
'model': model,
'children': [child.bind_to_model(model) for child in self.children],
'classname': self.classname,
})
class BaseMultiFieldPanel(BaseCompositeEditHandler):
template = "wagtailadmin/edit_handlers/multi_field_panel.html"
def classes(self):
classes = super(BaseMultiFieldPanel, self).classes()
classes.append("multi-field")
return classes
class MultiFieldPanel(object):
def __init__(self, children, heading="", classname=""):
self.children = children
self.heading = heading
self.classname = classname
def bind_to_model(self, model):
return type(str('_MultiFieldPanel'), (BaseMultiFieldPanel,), {
'model': model,
'children': [child.bind_to_model(model) for child in self.children],
'heading': self.heading,
'classname': self.classname,
})
class BaseFieldPanel(EditHandler):
TEMPLATE_VAR = 'field_panel'
@classmethod
def widget_overrides(cls):
"""check if a specific widget has been defined for this field"""
if hasattr(cls, 'widget'):
return {cls.field_name: cls.widget}
else:
return {}
def __init__(self, instance=None, form=None):
super(BaseFieldPanel, self).__init__(instance=instance, form=form)
self.bound_field = self.form[self.field_name]
self.heading = self.bound_field.label
self.help_text = self.bound_field.help_text
def classes(self):
classes = super(BaseFieldPanel, self).classes()
if self.bound_field.field.required:
classes.append("required")
if self.bound_field.errors:
classes.append("error")
classes.append(self.field_type())
return classes
def field_type(self):
return camelcase_to_underscore(self.bound_field.field.__class__.__name__)
def id_for_label(self):
return self.bound_field.id_for_label
object_template = "wagtailadmin/edit_handlers/single_field_panel.html"
def render_as_object(self):
return mark_safe(render_to_string(self.object_template, {
'self': self,
self.TEMPLATE_VAR: self,
'field': self.bound_field,
}))
field_template = "wagtailadmin/edit_handlers/field_panel_field.html"
def render_as_field(self):
context = {
'field': self.bound_field,
'field_type': self.field_type(),
}
return mark_safe(render_to_string(self.field_template, context))
@classmethod
def required_fields(cls):
return [cls.field_name]
@classmethod
def get_comparison_class(cls):
# Hide fields with hidden widget
widget_override = cls.widget_overrides().get(cls.field_name, None)
if widget_override and widget_override.is_hidden:
return
try:
field = cls.model._meta.get_field(cls.field_name)
if field.choices:
return compare.ChoiceFieldComparison
if field.is_relation:
if isinstance(field, TaggableManager):
return compare.TagsFieldComparison
elif field.many_to_many:
return compare.M2MFieldComparison
return compare.ForeignObjectComparison
if isinstance(field, RichTextField):
return compare.RichTextFieldComparison
except FieldDoesNotExist:
pass
return compare.FieldComparison
@classmethod
def get_comparison(cls):
comparator_class = cls.get_comparison_class()
if comparator_class:
field = cls.model._meta.get_field(cls.field_name)
return [curry(comparator_class, field)]
else:
return []
class FieldPanel(object):
def __init__(self, field_name, classname="", widget=None):
self.field_name = field_name
self.classname = classname
self.widget = widget
def bind_to_model(self, model):
base = {
'model': model,
'field_name': self.field_name,
'classname': self.classname,
}
if self.widget:
base['widget'] = self.widget
return type(str('_FieldPanel'), (BaseFieldPanel,), base)
class BaseRichTextFieldPanel(BaseFieldPanel):
@classmethod
def get_comparison_class(cls):
return compare.RichTextFieldComparison
class RichTextFieldPanel(object):
def __init__(self, field_name):
self.field_name = field_name
def bind_to_model(self, model):
return type(str('_RichTextFieldPanel'), (BaseRichTextFieldPanel,), {
'model': model,
'field_name': self.field_name,
})
class BaseChooserPanel(BaseFieldPanel):
"""
Abstract superclass for panels that provide a modal interface for choosing (or creating)
a database object such as an image, resulting in an ID that is used to populate
a hidden foreign key input.
Subclasses provide:
* field_template (only required if the default template of field_panel_field.html is not usable)
* object_type_name - something like 'image' which will be used as the var name
for the object instance in the field_template
"""
def get_chosen_item(self):
field = self.instance._meta.get_field(self.field_name)
related_model = field.rel.model
try:
return getattr(self.instance, self.field_name)
except related_model.DoesNotExist:
# if the ForeignKey is null=False, Django decides to raise
# a DoesNotExist exception here, rather than returning None
# like every other unpopulated field type. Yay consistency!
return None
def render_as_field(self):
instance_obj = self.get_chosen_item()
context = {
'field': self.bound_field,
self.object_type_name: instance_obj,
'is_chosen': bool(instance_obj), # DEPRECATED - passed to templates for backwards compatibility only
}
return mark_safe(render_to_string(self.field_template, context))
class BasePageChooserPanel(BaseChooserPanel):
object_type_name = "page"
@classmethod
def widget_overrides(cls):
return {cls.field_name: widgets.AdminPageChooser(
target_models=cls.target_models(),
can_choose_root=cls.can_choose_root)}
@cached_classmethod
def target_models(cls):
if cls.page_type:
target_models = []
for page_type in cls.page_type:
try:
target_models.append(resolve_model_string(page_type))
except LookupError:
raise ImproperlyConfigured(
"{0}.page_type must be of the form 'app_label.model_name', given {1!r}".format(
cls.__name__, page_type
)
)
except ValueError:
raise ImproperlyConfigured(
"{0}.page_type refers to model {1!r} that has not been installed".format(
cls.__name__, page_type
)
)
return target_models
else:
return [cls.model._meta.get_field(cls.field_name).rel.to]
class PageChooserPanel(object):
def __init__(self, field_name, page_type=None, can_choose_root=False):
self.field_name = field_name
if page_type:
# Convert single string/model into list
if not isinstance(page_type, (list, tuple)):
page_type = [page_type]
else:
page_type = []
self.page_type = page_type
self.can_choose_root = can_choose_root
def bind_to_model(self, model):
return type(str('_PageChooserPanel'), (BasePageChooserPanel,), {
'model': model,
'field_name': self.field_name,
'page_type': self.page_type,
'can_choose_root': self.can_choose_root,
})
class BaseInlinePanel(EditHandler):
@classmethod
def get_panel_definitions(cls):
# Look for a panels definition in the InlinePanel declaration
if cls.panels is not None:
return cls.panels
# Failing that, get it from the model
else:
return extract_panel_definitions_from_model_class(
cls.related.related_model,
exclude=[cls.related.field.name]
)
_child_edit_handler_class = None
@classmethod
def get_child_edit_handler_class(cls):
if cls._child_edit_handler_class is None:
panels = cls.get_panel_definitions()
cls._child_edit_handler_class = MultiFieldPanel(
panels,
heading=cls.heading
).bind_to_model(cls.related.related_model)
return cls._child_edit_handler_class
@classmethod
def required_formsets(cls):
child_edit_handler_class = cls.get_child_edit_handler_class()
return {
cls.relation_name: {
'fields': child_edit_handler_class.required_fields(),
'widgets': child_edit_handler_class.widget_overrides(),
'min_num': cls.min_num,
'validate_min': cls.min_num is not None,
'max_num': cls.max_num,
'validate_max': cls.max_num is not None
}
}
@classmethod
def html_declarations(cls):
return cls.get_child_edit_handler_class().html_declarations()
@classmethod
def get_comparison(cls):
field = cls.model._meta.get_field(cls.relation_name)
field_comparisons = []
for panel in cls.get_panel_definitions():
field_comparisons.extend(panel.bind_to_model(cls.related.related_model).get_comparison())
return [curry(compare.ChildRelationComparison, field, field_comparisons)]
def __init__(self, instance=None, form=None):
super(BaseInlinePanel, self).__init__(instance=instance, form=form)
self.formset = form.formsets[self.__class__.relation_name]
child_edit_handler_class = self.__class__.get_child_edit_handler_class()
self.children = []
for subform in self.formset.forms:
# override the DELETE field to have a hidden input
subform.fields['DELETE'].widget = forms.HiddenInput()
# ditto for the ORDER field, if present
if self.formset.can_order:
subform.fields['ORDER'].widget = forms.HiddenInput()
self.children.append(
child_edit_handler_class(instance=subform.instance, form=subform)
)
# if this formset is valid, it may have been re-ordered; respect that
# in case the parent form errored and we need to re-render
if self.formset.can_order and self.formset.is_valid():
self.children = sorted(self.children, key=lambda x: x.form.cleaned_data['ORDER'])
empty_form = self.formset.empty_form
empty_form.fields['DELETE'].widget = forms.HiddenInput()
if self.formset.can_order:
empty_form.fields['ORDER'].widget = forms.HiddenInput()
self.empty_child = child_edit_handler_class(instance=empty_form.instance, form=empty_form)
template = "wagtailadmin/edit_handlers/inline_panel.html"
def render(self):
formset = render_to_string(self.template, {
'self': self,
'can_order': self.formset.can_order,
})
js = self.render_js_init()
return widget_with_script(formset, js)
js_template = "wagtailadmin/edit_handlers/inline_panel.js"
def render_js_init(self):
return mark_safe(render_to_string(self.js_template, {
'self': self,
'can_order': self.formset.can_order,
}))
class InlinePanel(object):
def __init__(self, relation_name, panels=None, classname='', label='', help_text='', min_num=None, max_num=None):
self.relation_name = relation_name
self.panels = panels
self.label = label
self.help_text = help_text
self.min_num = min_num
self.max_num = max_num
self.classname = classname
def bind_to_model(self, model):
if django.VERSION >= (1, 9):
related = getattr(model, self.relation_name).rel
else:
related = getattr(model, self.relation_name).related
return type(str('_InlinePanel'), (BaseInlinePanel,), {
'model': model,
'relation_name': self.relation_name,
'related': related,
'panels': self.panels,
'heading': self.label,
'help_text': self.help_text,
# TODO: can we pick this out of the foreign key definition as an alternative?
# (with a bit of help from the inlineformset object, as we do for label/heading)
'min_num': self.min_num,
'max_num': self.max_num,
'classname': self.classname,
})
# This allows users to include the publishing panel in their own per-model override
# without having to write these fields out by hand, potentially losing 'classname'
# and therefore the associated styling of the publishing panel
def PublishingPanel():
return MultiFieldPanel([
FieldRowPanel([
FieldPanel('go_live_at'),
FieldPanel('expire_at'),
], classname="label-above"),
], ugettext_lazy('Scheduled publishing'), classname="publishing")
# Now that we've defined EditHandlers, we can set up wagtailcore.Page to have some.
Page.content_panels = [
FieldPanel('title', classname="full title"),
]
Page.promote_panels = [
MultiFieldPanel([
FieldPanel('slug'),
FieldPanel('seo_title'),
FieldPanel('show_in_menus'),
FieldPanel('search_description'),
], ugettext_lazy('Common page configuration')),
]
Page.settings_panels = [
PublishingPanel()
]
Page.base_form_class = WagtailAdminPageForm
@cached_classmethod
def get_edit_handler(cls):
"""
Get the EditHandler to use in the Wagtail admin when editing this page type.
"""
if hasattr(cls, 'edit_handler'):
return cls.edit_handler.bind_to_model(cls)
# construct a TabbedInterface made up of content_panels, promote_panels
# and settings_panels, skipping any which are empty
tabs = []
if cls.content_panels:
tabs.append(ObjectList(cls.content_panels, heading=ugettext_lazy('Content')))
if cls.promote_panels:
tabs.append(ObjectList(cls.promote_panels, heading=ugettext_lazy('Promote')))
if cls.settings_panels:
tabs.append(ObjectList(cls.settings_panels, heading=ugettext_lazy('Settings'), classname="settings"))
EditHandler = TabbedInterface(tabs, base_form_class=cls.base_form_class)
return EditHandler.bind_to_model(cls)
Page.get_edit_handler = get_edit_handler
class BaseStreamFieldPanel(BaseFieldPanel):
def classes(self):
classes = super(BaseStreamFieldPanel, self).classes()
classes.append("stream-field")
# In case of a validation error, BlockWidget will take care of outputting the error on the
# relevant sub-block, so we don't want the stream block as a whole to be wrapped in an 'error' class.
if 'error' in classes:
classes.remove("error")
return classes
@classmethod
def html_declarations(cls):
return cls.block_def.all_html_declarations()
@classmethod
def get_comparison_class(cls):
return compare.StreamFieldComparison
def id_for_label(self):
# a StreamField may consist of many input fields, so it's not meaningful to
# attach the label to any specific one
return ""
class StreamFieldPanel(object):
def __init__(self, field_name):
self.field_name = field_name
def bind_to_model(self, model):
return type(str('_StreamFieldPanel'), (BaseStreamFieldPanel,), {
'model': model,
'field_name': self.field_name,
'block_def': model._meta.get_field(self.field_name).stream_block
})
| {
"content_hash": "4278918696d9fc370e13f31a7c66cef2",
"timestamp": "",
"source": "github",
"line_count": 865,
"max_line_length": 117,
"avg_line_length": 33.57225433526011,
"alnum_prop": 0.6256198347107438,
"repo_name": "Toshakins/wagtail",
"id": "68e02ce6fe1fcf5922fa53d495e840f6de736939",
"size": "29040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wagtail/wagtailadmin/edit_handlers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "181300"
},
{
"name": "HTML",
"bytes": "317210"
},
{
"name": "JavaScript",
"bytes": "124720"
},
{
"name": "Makefile",
"bytes": "685"
},
{
"name": "Python",
"bytes": "2941667"
},
{
"name": "Shell",
"bytes": "7997"
}
],
"symlink_target": ""
} |
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
from pyalgotrade import dataseries
class BarDataSeries(dataseries.SequenceDataSeries):
"""A DataSeries of :class:`pyalgotrade.bar.Bar` instances.
:param maxLen: The maximum number of values to hold.
Once a bounded length is full, when new items are added, a corresponding number of items are discarded from the opposite end.
:type maxLen: int.
"""
def __init__(self, maxLen=dataseries.DEFAULT_MAX_LEN):
dataseries.SequenceDataSeries.__init__(self, maxLen)
self.__openDS = dataseries.SequenceDataSeries(maxLen)
self.__closeDS = dataseries.SequenceDataSeries(maxLen)
self.__highDS = dataseries.SequenceDataSeries(maxLen)
self.__lowDS = dataseries.SequenceDataSeries(maxLen)
self.__volumeDS = dataseries.SequenceDataSeries(maxLen)
self.__adjCloseDS = dataseries.SequenceDataSeries(maxLen)
self.__useAdjustedValues = False
def setUseAdjustedValues(self, useAdjusted):
self.__useAdjustedValues = useAdjusted
def append(self, bar):
self.appendWithDateTime(bar.getDateTime(), bar)
def appendWithDateTime(self, dateTime, bar):
assert(dateTime is not None)
assert(bar is not None)
bar.setUseAdjustedValue(self.__useAdjustedValues)
dataseries.SequenceDataSeries.appendWithDateTime(self, dateTime, bar)
self.__openDS.appendWithDateTime(dateTime, bar.getOpen())
self.__closeDS.appendWithDateTime(dateTime, bar.getClose())
self.__highDS.appendWithDateTime(dateTime, bar.getHigh())
self.__lowDS.appendWithDateTime(dateTime, bar.getLow())
self.__volumeDS.appendWithDateTime(dateTime, bar.getVolume())
self.__adjCloseDS.appendWithDateTime(dateTime, bar.getAdjClose())
def getOpenDataSeries(self):
"""Returns a :class:`pyalgotrade.dataseries.DataSeries` with the open prices."""
return self.__openDS
def getCloseDataSeries(self):
"""Returns a :class:`pyalgotrade.dataseries.DataSeries` with the close prices."""
return self.__closeDS
def getHighDataSeries(self):
"""Returns a :class:`pyalgotrade.dataseries.DataSeries` with the high prices."""
return self.__highDS
def getLowDataSeries(self):
"""Returns a :class:`pyalgotrade.dataseries.DataSeries` with the low prices."""
return self.__lowDS
def getVolumeDataSeries(self):
"""Returns a :class:`pyalgotrade.dataseries.DataSeries` with the volume."""
return self.__volumeDS
def getAdjCloseDataSeries(self):
"""Returns a :class:`pyalgotrade.dataseries.DataSeries` with the adjusted close prices."""
return self.__adjCloseDS
def getPriceDataSeries(self):
"""Returns a :class:`pyalgotrade.dataseries.DataSeries` with the close or adjusted close prices."""
if self.__useAdjustedValues:
return self.__adjCloseDS
else:
return self.__closeDS
| {
"content_hash": "f5ac447d8325f3dcd6c9f0b3ebcbd3cc",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 133,
"avg_line_length": 41.67123287671233,
"alnum_prop": 0.690664036817883,
"repo_name": "cgqyh/pyalgotrade-mod",
"id": "bdc2c9ef1b689a9a9d7b9057b196c1ff12e3ad7d",
"size": "3656",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyalgotrade/dataseries/bards.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1066824"
},
{
"name": "Shell",
"bytes": "504"
}
],
"symlink_target": ""
} |
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class WorkspacesOperations(object):
"""WorkspacesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~batch_ai.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
workspaces_list_options=None, # type: Optional["_models.WorkspacesListOptions"]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.WorkspaceListResult"]
"""Gets a list of Workspaces associated with the given subscription.
:param workspaces_list_options: Parameter group.
:type workspaces_list_options: ~batch_ai.models.WorkspacesListOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WorkspaceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~batch_ai.models.WorkspaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkspaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_max_results = None
if workspaces_list_options is not None:
_max_results = workspaces_list_options.max_results
api_version = "2018-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if _max_results is not None:
query_parameters['maxresults'] = self._serialize.query("max_results", _max_results, 'int', maximum=1000, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('WorkspaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.BatchAI/workspaces'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
workspaces_list_by_resource_group_options=None, # type: Optional["_models.WorkspacesListByResourceGroupOptions"]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.WorkspaceListResult"]
"""Gets a list of Workspaces within the specified resource group.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param workspaces_list_by_resource_group_options: Parameter group.
:type workspaces_list_by_resource_group_options: ~batch_ai.models.WorkspacesListByResourceGroupOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WorkspaceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~batch_ai.models.WorkspaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkspaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_max_results = None
if workspaces_list_by_resource_group_options is not None:
_max_results = workspaces_list_by_resource_group_options.max_results
api_version = "2018-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if _max_results is not None:
query_parameters['maxresults'] = self._serialize.query("max_results", _max_results, 'int', maximum=1000, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('WorkspaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces'} # type: ignore
def _create_initial(
self,
resource_group_name, # type: str
workspace_name, # type: str
parameters, # type: "_models.WorkspaceCreateParameters"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.Workspace"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Workspace"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'WorkspaceCreateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Workspace', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}'} # type: ignore
def begin_create(
self,
resource_group_name, # type: str
workspace_name, # type: str
parameters, # type: "_models.WorkspaceCreateParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Workspace"]
"""Creates a Workspace.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Workspace names can only contain a
combination of alphanumeric characters along with dash (-) and underscore (_). The name must be
from 1 through 64 characters long.
:type workspace_name: str
:param parameters: Workspace creation parameters.
:type parameters: ~batch_ai.models.WorkspaceCreateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Workspace or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~batch_ai.models.Workspace]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Workspace"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Workspace', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
workspace_name, # type: str
parameters, # type: "_models.WorkspaceUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.Workspace"
"""Updates properties of a Workspace.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Workspace names can only contain a
combination of alphanumeric characters along with dash (-) and underscore (_). The name must be
from 1 through 64 characters long.
:type workspace_name: str
:param parameters: Additional parameters for workspace update.
:type parameters: ~batch_ai.models.WorkspaceUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Workspace, or the result of cls(response)
:rtype: ~batch_ai.models.Workspace
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Workspace"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'WorkspaceUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Workspace', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a Workspace.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Workspace names can only contain a
combination of alphanumeric characters along with dash (-) and underscore (_). The name must be
from 1 through 64 characters long.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Workspace"
"""Gets information about a Workspace.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Workspace names can only contain a
combination of alphanumeric characters along with dash (-) and underscore (_). The name must be
from 1 through 64 characters long.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Workspace, or the result of cls(response)
:rtype: ~batch_ai.models.Workspace
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Workspace"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Workspace', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}'} # type: ignore
| {
"content_hash": "db43ba774631bc776c7ac84a15928d1d",
"timestamp": "",
"source": "github",
"line_count": 569,
"max_line_length": 179,
"avg_line_length": 49.86467486818981,
"alnum_prop": 0.6372960208649068,
"repo_name": "Azure/azure-sdk-for-python",
"id": "2176b2fa1cb809bdff63030c38c55cfd589c2d30",
"size": "28840",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/batchai/azure-mgmt-batchai/azure/mgmt/batchai/operations/_workspaces_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import csv
from BeautifulSoup import BeautifulSoup
from constants import PATH, TEAMS
def scraping(year, category, division, team):
players = []
path = PATH % (year, category, division, team)
f = open(path)
html = f.read()
soup = BeautifulSoup(html)
rows = soup.findAll('tr', {'class': 'ststats'})
for row in rows:
player = [cell.string for cell in row.findAll('td')]
players.append([year,team,division]+player)
return players
def get_header(year, category, division, team):
header = []
path = PATH % (year, category, division, team)
f = open(path)
html = f.read()
soup = BeautifulSoup(html)
cols = soup.find(id='stdivmaintbl').findAll('th')
for col in cols:
h = ''.join([content.encode("utf8") for content in col.contents
if content.string])
header.append(h.replace(" ", ""))
header = ["年度", "チーム", "部"] + header
return header
if __name__ == "__main__":
years = range(2005, 2015)
stats_b = []
stats_p = []
header_flg = False
for year in years:
print year
for team in TEAMS[year]:
stats_b += scraping(year, 'b', 1, team)
stats_b += scraping(year, 'b', 2, team)
stats_p += scraping(year, 'p', 1, team)
if not header_flg:
header_b = get_header(year, 'b', 1, team)
header_p = get_header(year, 'p', 1, team)
header_b = map(lambda s: s.replace("|", "ー"), header_b)
header_p = map(lambda s: s.replace("|", "ー"), header_p)
writer = csv.writer(open("batting.csv", "w"), lineterminator="\n")
writer.writerow(header_b)
writer.writerows(stats_b)
writer = csv.writer(open("pitching.csv", "w"), lineterminator="\n")
writer.writerow(header_p)
writer.writerows(stats_p)
| {
"content_hash": "146c93e443115ce2a2d2fac005cbbebe",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 72,
"avg_line_length": 29.22222222222222,
"alnum_prop": 0.5736013036393265,
"repo_name": "who-you-me/baseball",
"id": "c5f1323855964024204a1102124464bbe55c45d1",
"size": "1889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scraper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3524"
}
],
"symlink_target": ""
} |
"""Version."""
# (major, minor, micro, release type, pre-release build, post-release build)
version_info = (4, 5, 1, 'final', 0, 0)
def _version():
"""
Get the version (PEP 440).
Version structure
(major, minor, micro, release type, pre-release build, post-release build)
Release names are named is such a way they are sortable and comparable with ease.
(alpha | beta | candidate | final)
- "final" should never have a pre-release build number
- pre-releases should have a pre-release build number greater than 0
- post-release is only applied if post-release build is greater than 0
"""
releases = {"alpha": 'a', "beta": 'b', "candidate": 'rc', "final": ''}
# Version info should be proper length
assert len(version_info) == 6
# Should be a valid release
assert version_info[3] in releases
# Pre-release releases should have a pre-release value
assert version_info[3] == 'final' or version_info[4] > 0
# Final should not have a pre-release value
assert version_info[3] != 'final' or version_info[4] == 0
main = '.'.join(str(x)for x in (version_info[0:2] if version_info[2] == 0 else version_info[0:3]))
prerel = releases[version_info[3]]
prerel += str(version_info[4]) if prerel else ''
postrel = '.post%d' % version_info[5] if version_info[5] > 0 else ''
return ''.join((main, prerel, postrel))
version = _version()
| {
"content_hash": "d07bb5bbdcc48b9c9b8b9145ac9c5422",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 102,
"avg_line_length": 36.666666666666664,
"alnum_prop": 0.6468531468531469,
"repo_name": "nimzco/Environment",
"id": "e2a9fe2cc6059358f717fc1289ae7adbfe9e5963",
"size": "1430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Sublime/Packages/pymdownx/st3/pymdownx/__version__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "332445"
},
{
"name": "Python",
"bytes": "3101171"
},
{
"name": "Shell",
"bytes": "26630"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.logic import LogicManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-logic
# USAGE
python delete_a_workflow.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = LogicManagementClient(
credential=DefaultAzureCredential(),
subscription_id="34adfa4f-cedf-4dc0-ba29-b6d1a69ab345",
)
response = client.workflows.delete(
resource_group_name="test-resource-group",
workflow_name="test-workflow",
)
print(response)
# x-ms-original-file: specification/logic/resource-manager/Microsoft.Logic/stable/2019-05-01/examples/Workflows_Delete.json
if __name__ == "__main__":
main()
| {
"content_hash": "2ecc94aa9c69d11216e7c070cbda24ba",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 123,
"avg_line_length": 32.666666666666664,
"alnum_prop": 0.7282003710575139,
"repo_name": "Azure/azure-sdk-for-python",
"id": "d33fd211fc5a2f076a9b804f14aff9b669490d11",
"size": "1546",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/logic/azure-mgmt-logic/generated_samples/delete_a_workflow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from CIM15.IEC61970.Core.IdentifiedObject import IdentifiedObject
class ErpInventoryCount(IdentifiedObject):
"""This is related to Inventory physical counts organized by AssetModel. Note that a count of a type of asset can be accomplished by the association inherited by AssetModel (from Document) to Asset. It enables ERP applications to transfer an inventory count between ERP and the actual physical inventory location. This count may be a cycle count or a physical count.This is related to Inventory physical counts organized by AssetModel. Note that a count of a type of asset can be accomplished by the association inherited by AssetModel (from Document) to Asset. It enables ERP applications to transfer an inventory count between ERP and the actual physical inventory location. This count may be a cycle count or a physical count.
"""
def __init__(self, status=None, MaterialItem=None, AssetModel=None, *args, **kw_args):
"""Initialises a new 'ErpInventoryCount' instance.
@param status:
@param MaterialItem:
@param AssetModel:
"""
self.status = status
self._MaterialItem = None
self.MaterialItem = MaterialItem
self._AssetModel = None
self.AssetModel = AssetModel
super(ErpInventoryCount, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["status", "MaterialItem", "AssetModel"]
_many_refs = []
status = None
def getMaterialItem(self):
return self._MaterialItem
def setMaterialItem(self, value):
if self._MaterialItem is not None:
filtered = [x for x in self.MaterialItem.ErpInventoryCounts if x != self]
self._MaterialItem._ErpInventoryCounts = filtered
self._MaterialItem = value
if self._MaterialItem is not None:
if self not in self._MaterialItem._ErpInventoryCounts:
self._MaterialItem._ErpInventoryCounts.append(self)
MaterialItem = property(getMaterialItem, setMaterialItem)
def getAssetModel(self):
return self._AssetModel
def setAssetModel(self, value):
if self._AssetModel is not None:
filtered = [x for x in self.AssetModel.ErpInventoryCounts if x != self]
self._AssetModel._ErpInventoryCounts = filtered
self._AssetModel = value
if self._AssetModel is not None:
if self not in self._AssetModel._ErpInventoryCounts:
self._AssetModel._ErpInventoryCounts.append(self)
AssetModel = property(getAssetModel, setAssetModel)
| {
"content_hash": "8dd2b888078befe72af8f39cfceea809",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 733,
"avg_line_length": 41.265625,
"alnum_prop": 0.6815600151457781,
"repo_name": "rwl/PyCIM",
"id": "47da5a765831bf01dd19cdf89b45993246671f06",
"size": "3741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CIM15/IEC61970/Informative/InfERPSupport/ErpInventoryCount.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7420564"
}
],
"symlink_target": ""
} |
from supybot.test import *
import copy
import pickle
import supybot.conf as conf
import supybot.irclib as irclib
import supybot.ircmsgs as ircmsgs
# The test framework used to provide these, but not it doesn't. We'll add
# messages to as we find bugs (if indeed we find bugs).
msgs = []
rawmsgs = []
class IrcMsgQueueTestCase(SupyTestCase):
mode = ircmsgs.op('#foo', 'jemfinch')
msg = ircmsgs.privmsg('#foo', 'hey, you')
msgs = [ircmsgs.privmsg('#foo', str(i)) for i in range(10)]
kick = ircmsgs.kick('#foo', 'PeterB')
pong = ircmsgs.pong('123')
ping = ircmsgs.ping('123')
topic = ircmsgs.topic('#foo')
notice = ircmsgs.notice('jemfinch', 'supybot here')
join = ircmsgs.join('#foo')
who = ircmsgs.who('#foo')
def testInit(self):
q = irclib.IrcMsgQueue([self.msg, self.topic, self.ping])
self.assertEqual(len(q), 3)
def testLen(self):
q = irclib.IrcMsgQueue()
q.enqueue(self.msg)
self.assertEqual(len(q), 1)
q.enqueue(self.mode)
self.assertEqual(len(q), 2)
q.enqueue(self.kick)
self.assertEqual(len(q), 3)
q.enqueue(self.topic)
self.assertEqual(len(q), 4)
q.dequeue()
self.assertEqual(len(q), 3)
q.dequeue()
self.assertEqual(len(q), 2)
q.dequeue()
self.assertEqual(len(q), 1)
q.dequeue()
self.assertEqual(len(q), 0)
def testContains(self):
q = irclib.IrcMsgQueue()
q.enqueue(self.msg)
q.enqueue(self.msg)
q.enqueue(self.msg)
self.failUnless(self.msg in q)
q.dequeue()
self.failUnless(self.msg in q)
q.dequeue()
self.failUnless(self.msg in q)
q.dequeue()
self.failIf(self.msg in q)
def testRepr(self):
q = irclib.IrcMsgQueue()
self.assertEqual(repr(q), 'IrcMsgQueue([])')
q.enqueue(self.msg)
try:
repr(q)
except Exception as e:
self.fail('repr(q) raised an exception: %s' %
utils.exnToString(e))
def testEmpty(self):
q = irclib.IrcMsgQueue()
self.failIf(q)
def testEnqueueDequeue(self):
q = irclib.IrcMsgQueue()
q.enqueue(self.msg)
self.failUnless(q)
self.assertEqual(self.msg, q.dequeue())
self.failIf(q)
q.enqueue(self.msg)
q.enqueue(self.notice)
self.assertEqual(self.msg, q.dequeue())
self.assertEqual(self.notice, q.dequeue())
for msg in self.msgs:
q.enqueue(msg)
for msg in self.msgs:
self.assertEqual(msg, q.dequeue())
def testPrioritizing(self):
q = irclib.IrcMsgQueue()
q.enqueue(self.msg)
q.enqueue(self.mode)
self.assertEqual(self.mode, q.dequeue())
self.assertEqual(self.msg, q.dequeue())
q.enqueue(self.msg)
q.enqueue(self.kick)
self.assertEqual(self.kick, q.dequeue())
self.assertEqual(self.msg, q.dequeue())
q.enqueue(self.ping)
q.enqueue(self.msgs[0])
q.enqueue(self.kick)
q.enqueue(self.msgs[1])
q.enqueue(self.mode)
self.assertEqual(self.kick, q.dequeue())
self.assertEqual(self.mode, q.dequeue())
self.assertEqual(self.ping, q.dequeue())
self.assertEqual(self.msgs[0], q.dequeue())
self.assertEqual(self.msgs[1], q.dequeue())
def testNoIdenticals(self):
configVar = conf.supybot.protocols.irc.queuing.duplicates
original = configVar()
try:
configVar.setValue(True)
q = irclib.IrcMsgQueue()
q.enqueue(self.msg)
q.enqueue(self.msg)
self.assertEqual(self.msg, q.dequeue())
self.failIf(q)
finally:
configVar.setValue(original)
def testJoinBeforeWho(self):
q = irclib.IrcMsgQueue()
q.enqueue(self.join)
q.enqueue(self.who)
self.assertEqual(self.join, q.dequeue())
self.assertEqual(self.who, q.dequeue())
## q.enqueue(self.who)
## q.enqueue(self.join)
## self.assertEqual(self.join, q.dequeue())
## self.assertEqual(self.who, q.dequeue())
def testTopicBeforePrivmsg(self):
q = irclib.IrcMsgQueue()
q.enqueue(self.msg)
q.enqueue(self.topic)
self.assertEqual(self.topic, q.dequeue())
self.assertEqual(self.msg, q.dequeue())
def testModeBeforePrivmsg(self):
q = irclib.IrcMsgQueue()
q.enqueue(self.msg)
q.enqueue(self.mode)
self.assertEqual(self.mode, q.dequeue())
self.assertEqual(self.msg, q.dequeue())
q.enqueue(self.mode)
q.enqueue(self.msg)
self.assertEqual(self.mode, q.dequeue())
self.assertEqual(self.msg, q.dequeue())
class ChannelStateTestCase(SupyTestCase):
def testPickleCopy(self):
c = irclib.ChannelState()
self.assertEqual(pickle.loads(pickle.dumps(c)), c)
c.addUser('jemfinch')
c1 = pickle.loads(pickle.dumps(c))
self.assertEqual(c, c1)
c.removeUser('jemfinch')
self.failIf('jemfinch' in c.users)
self.failUnless('jemfinch' in c1.users)
def testCopy(self):
c = irclib.ChannelState()
c.addUser('jemfinch')
c1 = copy.deepcopy(c)
c.removeUser('jemfinch')
self.failIf('jemfinch' in c.users)
self.failUnless('jemfinch' in c1.users)
def testAddUser(self):
c = irclib.ChannelState()
c.addUser('foo')
self.failUnless('foo' in c.users)
self.failIf('foo' in c.ops)
self.failIf('foo' in c.voices)
self.failIf('foo' in c.halfops)
c.addUser('+bar')
self.failUnless('bar' in c.users)
self.failUnless('bar' in c.voices)
self.failIf('bar' in c.ops)
self.failIf('bar' in c.halfops)
c.addUser('%baz')
self.failUnless('baz' in c.users)
self.failUnless('baz' in c.halfops)
self.failIf('baz' in c.voices)
self.failIf('baz' in c.ops)
c.addUser('@quuz')
self.failUnless('quuz' in c.users)
self.failUnless('quuz' in c.ops)
self.failIf('quuz' in c.halfops)
self.failIf('quuz' in c.voices)
class IrcStateTestCase(SupyTestCase):
class FakeIrc:
nick = 'nick'
prefix = 'nick!user@host'
irc = FakeIrc()
def testKickRemovesChannel(self):
st = irclib.IrcState()
st.channels['#foo'] = irclib.ChannelState()
m = ircmsgs.kick('#foo', self.irc.nick, prefix=self.irc.prefix)
st.addMsg(self.irc, m)
self.failIf('#foo' in st.channels)
def testAddMsgRemovesOpsProperly(self):
st = irclib.IrcState()
st.channels['#foo'] = irclib.ChannelState()
st.channels['#foo'].ops.add('bar')
m = ircmsgs.mode('#foo', ('-o', 'bar'))
st.addMsg(self.irc, m)
self.failIf('bar' in st.channels['#foo'].ops)
def testNickChangesChangeChannelUsers(self):
st = irclib.IrcState()
st.channels['#foo'] = irclib.ChannelState()
st.channels['#foo'].addUser('@bar')
self.failUnless('bar' in st.channels['#foo'].users)
self.failUnless(st.channels['#foo'].isOp('bar'))
st.addMsg(self.irc, ircmsgs.IrcMsg(':bar!asfd@asdf.com NICK baz'))
self.failIf('bar' in st.channels['#foo'].users)
self.failIf(st.channels['#foo'].isOp('bar'))
self.failUnless('baz' in st.channels['#foo'].users)
self.failUnless(st.channels['#foo'].isOp('baz'))
def testHistory(self):
if len(msgs) < 10:
return
maxHistoryLength = conf.supybot.protocols.irc.maxHistoryLength
with maxHistoryLength.context(10):
state = irclib.IrcState()
for msg in msgs:
try:
state.addMsg(self.irc, msg)
except Exception:
pass
self.failIf(len(state.history) > maxHistoryLength())
self.assertEqual(len(state.history), maxHistoryLength())
self.assertEqual(list(state.history),
msgs[len(msgs) - maxHistoryLength():])
def testWasteland005(self):
state = irclib.IrcState()
# Here we're testing if PREFIX works without the (ov) there.
state.addMsg(self.irc, ircmsgs.IrcMsg(':desolate.wasteland.org 005 jemfinch NOQUIT WATCH=128 SAFELIST MODES=6 MAXCHANNELS=10 MAXBANS=100 NICKLEN=30 TOPICLEN=307 KICKLEN=307 CHANTYPES=&# PREFIX=@+ NETWORK=DALnet SILENCE=10 :are available on this server'))
self.assertEqual(state.supported['prefix']['o'], '@')
self.assertEqual(state.supported['prefix']['v'], '+')
def testIRCNet005(self):
state = irclib.IrcState()
# Testing IRCNet's misuse of MAXBANS
state.addMsg(self.irc, ircmsgs.IrcMsg(':irc.inet.tele.dk 005 adkwbot WALLCHOPS KNOCK EXCEPTS INVEX MODES=4 MAXCHANNELS=20 MAXBANS=beI:100 MAXTARGETS=4 NICKLEN=9 TOPICLEN=120 KICKLEN=90 :are supported by this server'))
self.assertEqual(state.supported['maxbans'], 100)
def testSupportedUmodes(self):
state = irclib.IrcState()
state.addMsg(self.irc, ircmsgs.IrcMsg(':coulomb.oftc.net 004 testnick coulomb.oftc.net hybrid-7.2.2+oftc1.6.8 CDGPRSabcdfgiklnorsuwxyz biklmnopstveI bkloveI'))
self.assertEqual(state.supported['umodes'],
frozenset('CDGPRSabcdfgiklnorsuwxyz'))
self.assertEqual(state.supported['chanmodes'],
frozenset('biklmnopstveI'))
def testEmptyTopic(self):
state = irclib.IrcState()
state.addMsg(self.irc, ircmsgs.topic('#foo'))
def testPickleCopy(self):
state = irclib.IrcState()
self.assertEqual(state, pickle.loads(pickle.dumps(state)))
for msg in msgs:
try:
state.addMsg(self.irc, msg)
except Exception:
pass
self.assertEqual(state, pickle.loads(pickle.dumps(state)))
def testCopy(self):
state = irclib.IrcState()
self.assertEqual(state, state.copy())
for msg in msgs:
try:
state.addMsg(self.irc, msg)
except Exception:
pass
self.assertEqual(state, state.copy())
def testCopyCopiesChannels(self):
state = irclib.IrcState()
stateCopy = state.copy()
state.channels['#foo'] = None
self.failIf('#foo' in stateCopy.channels)
def testJoin(self):
st = irclib.IrcState()
st.addMsg(self.irc, ircmsgs.join('#foo', prefix=self.irc.prefix))
self.failUnless('#foo' in st.channels)
self.failUnless(self.irc.nick in st.channels['#foo'].users)
st.addMsg(self.irc, ircmsgs.join('#foo', prefix='foo!bar@baz'))
self.failUnless('foo' in st.channels['#foo'].users)
st2 = st.copy()
st.addMsg(self.irc, ircmsgs.quit(prefix='foo!bar@baz'))
self.failIf('foo' in st.channels['#foo'].users)
self.failUnless('foo' in st2.channels['#foo'].users)
def testEq(self):
state1 = irclib.IrcState()
state2 = irclib.IrcState()
self.assertEqual(state1, state2)
for msg in msgs:
try:
state1.addMsg(self.irc, msg)
state2.addMsg(self.irc, msg)
self.assertEqual(state1, state2)
except Exception:
pass
def testHandlesModes(self):
st = irclib.IrcState()
st.addMsg(self.irc, ircmsgs.join('#foo', prefix=self.irc.prefix))
self.failIf('bar' in st.channels['#foo'].ops)
st.addMsg(self.irc, ircmsgs.op('#foo', 'bar'))
self.failUnless('bar' in st.channels['#foo'].ops)
st.addMsg(self.irc, ircmsgs.deop('#foo', 'bar'))
self.failIf('bar' in st.channels['#foo'].ops)
self.failIf('bar' in st.channels['#foo'].voices)
st.addMsg(self.irc, ircmsgs.voice('#foo', 'bar'))
self.failUnless('bar' in st.channels['#foo'].voices)
st.addMsg(self.irc, ircmsgs.devoice('#foo', 'bar'))
self.failIf('bar' in st.channels['#foo'].voices)
self.failIf('bar' in st.channels['#foo'].halfops)
st.addMsg(self.irc, ircmsgs.halfop('#foo', 'bar'))
self.failUnless('bar' in st.channels['#foo'].halfops)
st.addMsg(self.irc, ircmsgs.dehalfop('#foo', 'bar'))
self.failIf('bar' in st.channels['#foo'].halfops)
def testDoModeOnlyChannels(self):
st = irclib.IrcState()
self.assert_(st.addMsg(self.irc, ircmsgs.IrcMsg('MODE foo +i')) or 1)
class IrcTestCase(SupyTestCase):
def setUp(self):
self.irc = irclib.Irc('test')
#m = self.irc.takeMsg()
#self.failUnless(m.command == 'PASS', 'Expected PASS, got %r.' % m)
m = self.irc.takeMsg()
self.failUnless(m.command == 'CAP', 'Expected CAP, got %r.' % m)
self.failUnless(m.args == ('LS', '302'), 'Expected CAP LS 302, got %r.' % m)
m = self.irc.takeMsg()
self.failUnless(m.command == 'NICK', 'Expected NICK, got %r.' % m)
m = self.irc.takeMsg()
self.failUnless(m.command == 'USER', 'Expected USER, got %r.' % m)
# TODO
self.irc.feedMsg(ircmsgs.IrcMsg(command='CAP',
args=('*', 'LS', '*', 'account-tag multi-prefix')))
self.irc.feedMsg(ircmsgs.IrcMsg(command='CAP',
args=('*', 'LS', 'extended-join')))
m = self.irc.takeMsg()
self.failUnless(m.command == 'CAP', 'Expected CAP, got %r.' % m)
self.assertEqual(m.args[0], 'REQ', m)
# NOTE: Capabilities are requested in alphabetic order, because
# sets are unordered, and their "order" is nondeterministic.
self.assertEqual(m.args[1], 'account-tag extended-join multi-prefix')
self.irc.feedMsg(ircmsgs.IrcMsg(command='CAP',
args=('*', 'ACK', 'account-tag multi-prefix extended-join')))
m = self.irc.takeMsg()
self.failUnless(m.command == 'CAP', 'Expected CAP, got %r.' % m)
self.assertEqual(m.args, ('END',), m)
m = self.irc.takeMsg()
self.failUnless(m is None, m)
def testPingResponse(self):
self.irc.feedMsg(ircmsgs.ping('123'))
self.assertEqual(ircmsgs.pong('123'), self.irc.takeMsg())
def test433Response(self):
# This is necessary; it won't change nick if irc.originalName==irc.nick
self.irc.nick = 'somethingElse'
self.irc.feedMsg(ircmsgs.IrcMsg('433 * %s :Nickname already in use.' %\
self.irc.nick))
msg = self.irc.takeMsg()
self.failUnless(msg.command == 'NICK' and msg.args[0] != self.irc.nick)
self.irc.feedMsg(ircmsgs.IrcMsg('433 * %s :Nickname already in use.' %\
self.irc.nick))
msg = self.irc.takeMsg()
self.failUnless(msg.command == 'NICK' and msg.args[0] != self.irc.nick)
def testSendBeforeQueue(self):
while self.irc.takeMsg() is not None:
self.irc.takeMsg()
self.irc.queueMsg(ircmsgs.IrcMsg('NOTICE #foo bar'))
self.irc.sendMsg(ircmsgs.IrcMsg('PRIVMSG #foo yeah!'))
msg = self.irc.takeMsg()
self.failUnless(msg.command == 'PRIVMSG')
msg = self.irc.takeMsg()
self.failUnless(msg.command == 'NOTICE')
def testNoMsgLongerThan512(self):
self.irc.queueMsg(ircmsgs.privmsg('whocares', 'x'*1000))
msg = self.irc.takeMsg()
self.failUnless(len(msg) <= 512, 'len(msg) was %s' % len(msg))
def testReset(self):
for msg in msgs:
try:
self.irc.feedMsg(msg)
except:
pass
self.irc.reset()
self.failIf(self.irc.state.history)
self.failIf(self.irc.state.channels)
self.failIf(self.irc.outstandingPing)
def testHistory(self):
self.irc.reset()
msg1 = ircmsgs.IrcMsg('PRIVMSG #linux :foo bar baz!')
self.irc.feedMsg(msg1)
self.assertEqual(self.irc.state.history[0], msg1)
msg2 = ircmsgs.IrcMsg('JOIN #sourcereview')
self.irc.feedMsg(msg2)
self.assertEqual(list(self.irc.state.history), [msg1, msg2])
class IrcCallbackTestCase(SupyTestCase):
class FakeIrc:
pass
irc = FakeIrc()
def testName(self):
class UnnamedIrcCallback(irclib.IrcCallback):
pass
unnamed = UnnamedIrcCallback()
class NamedIrcCallback(irclib.IrcCallback):
myName = 'foobar'
def name(self):
return self.myName
named = NamedIrcCallback()
self.assertEqual(unnamed.name(), unnamed.__class__.__name__)
self.assertEqual(named.name(), named.myName)
def testDoCommand(self):
def makeCommand(msg):
return 'do' + msg.command.capitalize()
class DoCommandCatcher(irclib.IrcCallback):
def __init__(self):
self.L = []
def __getattr__(self, attr):
self.L.append(attr)
return lambda *args: None
doCommandCatcher = DoCommandCatcher()
for msg in msgs:
doCommandCatcher(self.irc, msg)
commands = list(map(makeCommand, msgs))
self.assertEqual(doCommandCatcher.L, commands)
def testFirstCommands(self):
try:
originalNick = conf.supybot.nick()
originalUser = conf.supybot.user()
originalPassword = conf.supybot.networks.test.password()
nick = 'nick'
conf.supybot.nick.setValue(nick)
user = 'user any user'
conf.supybot.user.setValue(user)
expected = [
ircmsgs.IrcMsg(command='CAP', args=('LS', '302')),
ircmsgs.nick(nick),
ircmsgs.user('limnoria', user),
]
irc = irclib.Irc('test')
msgs = [irc.takeMsg()]
while msgs[-1] is not None:
msgs.append(irc.takeMsg())
msgs.pop()
self.assertEqual(msgs, expected)
password = 'password'
conf.supybot.networks.test.password.setValue(password)
irc = irclib.Irc('test')
msgs = [irc.takeMsg()]
while msgs[-1] is not None:
msgs.append(irc.takeMsg())
msgs.pop()
expected.insert(1, ircmsgs.password(password))
self.assertEqual(msgs, expected)
finally:
conf.supybot.nick.setValue(originalNick)
conf.supybot.user.setValue(originalUser)
conf.supybot.networks.test.password.setValue(originalPassword)
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| {
"content_hash": "a2031bf7e5b042f1c96786e44011e6c0",
"timestamp": "",
"source": "github",
"line_count": 508,
"max_line_length": 262,
"avg_line_length": 36.98031496062992,
"alnum_prop": 0.5858085808580858,
"repo_name": "Ban3/Limnoria",
"id": "ca78d6285f0fa88959d349d8c1cf85fdfacc58f7",
"size": "20374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_irclib.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "864"
},
{
"name": "Python",
"bytes": "2513657"
},
{
"name": "Shell",
"bytes": "217"
}
],
"symlink_target": ""
} |
import time
import pymongo
from mongotools.pubsub import Channel
cli = pymongo.MongoClient()
chan = Channel(cli.test, 'mychannel')
chan.ensure_channel()
def printer(chan, msg):
print chan, msg
chan.sub('foo', printer)
chan.sub('bar', printer)
while True:
chan.handle_ready(await=True)
time.sleep(0.1)
| {
"content_hash": "898e6935dcf19af260caee7b4ddf2d7b",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 37,
"avg_line_length": 17.88888888888889,
"alnum_prop": 0.7080745341614907,
"repo_name": "rick446/MongoTools",
"id": "1f014564175a6c09df52267e9ae2ba4cd1e1cc36",
"size": "335",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mongotools/examples/pubsub/simple_sub.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "71842"
}
],
"symlink_target": ""
} |
from .resource_update import ResourceUpdate
class DiskUpdate(ResourceUpdate):
"""Disk update resource.
:param tags: Resource tags
:type tags: dict[str, str]
:param sku:
:type sku: ~azure.mgmt.compute.v2018_04_01.models.DiskSku
:param os_type: the Operating System type. Possible values include:
'Windows', 'Linux'
:type os_type: str or
~azure.mgmt.compute.v2018_04_01.models.OperatingSystemTypes
:param disk_size_gb: If creationData.createOption is Empty, this field is
mandatory and it indicates the size of the VHD to create. If this field is
present for updates or creation with other options, it indicates a resize.
Resizes are only allowed if the disk is not attached to a running VM, and
can only increase the disk's size.
:type disk_size_gb: int
:param encryption_settings: Encryption settings for disk or snapshot
:type encryption_settings:
~azure.mgmt.compute.v2018_04_01.models.EncryptionSettings
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'DiskSku'},
'os_type': {'key': 'properties.osType', 'type': 'OperatingSystemTypes'},
'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'},
'encryption_settings': {'key': 'properties.encryptionSettings', 'type': 'EncryptionSettings'},
}
def __init__(self, **kwargs):
super(DiskUpdate, self).__init__(**kwargs)
self.os_type = kwargs.get('os_type', None)
self.disk_size_gb = kwargs.get('disk_size_gb', None)
self.encryption_settings = kwargs.get('encryption_settings', None)
| {
"content_hash": "ac0a0ea32d804ccd0782ae1b7e13dc95",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 102,
"avg_line_length": 43.60526315789474,
"alnum_prop": 0.660229330114665,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "7364769892bfd2ea7d6d763610b54b5a28f99ee1",
"size": "2131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/models/disk_update.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
from model.group import Group # Модель группы контактов адресной книги
import pytest # Исполнение тестов
# Использование тестовых данных из py-файла
def test_add_group_data(app, db, check_ui, data_groups):
with pytest.allure.step('Given a group list'): # Отметка для отчета Allure
old_groups = db.get_group_list()
group = data_groups
with pytest.allure.step('When I add a group %s to the list' % group): # Отметка для отчета Allure
app.group.create(group)
with pytest.allure.step('Then the new group list is equal to the old list with the added group'): # Отметка для отчета Allure
new_groups = db.get_group_list()
old_groups.append(group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
if check_ui: # Проверка необходимости дополнительной проверки пользовательского интерфейса
with pytest.allure.step('Also check UI'): # Отметка для отчета Allure
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
# Использование тестовых данных из json-файла
def test_add_group_json(app, db, check_ui, json_groups):
with pytest.allure.step('Given a group list'): # Отметка для отчета Allure
old_groups = db.get_group_list()
group = json_groups
with pytest.allure.step('When I add a group %s to the list' % group): # Отметка для отчета Allure
app.group.create(group)
with pytest.allure.step('Then the new group list is equal to the old list with the added group'): # Отметка для отчета Allure
new_groups = db.get_group_list()
old_groups.append(group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
if check_ui: # Проверка необходимости дополнительной проверки пользовательского интерфейса
with pytest.allure.step('Also check UI'): # Отметка для отчета Allure
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
| {
"content_hash": "7603d923401ddad06392873eb4308d30",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 153,
"avg_line_length": 70.32352941176471,
"alnum_prop": 0.6035131744040151,
"repo_name": "NovikovMA/python_training",
"id": "d442085f67ed142009260218adf0340ce86680f0",
"size": "2801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_add_group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Cucumber",
"bytes": "1182"
},
{
"name": "Python",
"bytes": "154097"
},
{
"name": "RobotFramework",
"bytes": "1345"
}
],
"symlink_target": ""
} |
import json
from DataBaseService import DataBaseService
class APIManager:
def __init__(self):
self.db = DataBaseService()
def registerNewClient(self, info):
try:
newUserData = json.loads(info)
username = newUserData['username']
firstName = newUserData['firstname']
lastName = newUserData['lastname']
password = newUserData['password']
phone = newUserData['mobile']
email = newUserData['email']
res = self.db.checkUserData(username, email)
if res == 0:
self.db.insertUser(username, firstName, lastName, password, phone, email)
return res
except ImportError as e:
print json.dumps({"status" : "error", "APIManager.registerNewClient" : str(e)})
exit(0)
def authenticate(self, info):
try:
userData = json.loads(info)
name = userData['username']
passwordEntered = userData['password']
res = self.db.authenticate(name, passwordEntered)
print res
except ImportError as e:
print json.dumps({"status" : "error", "APIManager.authenticate" : str(e)})
exit(0)
def createFriendship(self, friendData):
try:
'''friendUsername = userData['username']
ownUsername = myUsername'''
ownUsername = 'alex93'
friendUsername = 'lyubo93'
self.db.createFriendship(ownUsername, friendUsername)
except ImportError as e:
print json.dumps({"status" : "error", "APIManager.createFriendship" : str(e)})
exit(0)
def sendMessage(self, messageData):
try:
'''sender = messageData['sender']
receiver = messageData['receiver']
message = messageData['message'] '''
sender = 'alex93'
receiver = 'lyubo93'
message = 'Hi Lyubomir!'
self.db.sendMessage(sender, receiver, message)
except ImportError as e:
print json.dumps({"status" : "error", "APIManager.sendMessage" : str(e)})
exit(0)
| {
"content_hash": "3f9e49ff22689e20f07439a8d09be7c2",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 91,
"avg_line_length": 38.21052631578947,
"alnum_prop": 0.5661157024793388,
"repo_name": "lyubomir1993/AlohaServer",
"id": "2b7170de85d09c3428483178dc7f8f3a88d311e1",
"size": "2197",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "APIManager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "40"
},
{
"name": "HTML",
"bytes": "5144"
},
{
"name": "JavaScript",
"bytes": "2637"
},
{
"name": "Python",
"bytes": "71954"
}
],
"symlink_target": ""
} |
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_installed_packages(host):
assert host.package('logstash').is_installed
assert host.package('logstash').version == '1:6.6.0-1'
def test_files(host):
to_add = [
'/etc/default/logstash',
'/etc/logstash/conf.d/01-inputs.conf',
'/etc/logstash/conf.d/10-filters.conf',
'/etc/logstash/conf.d/90-outputs.conf',
]
for config in to_add:
assert host.file(config).exists
def test_service(host):
assert host.service('logstash').is_enabled
assert host.service('logstash').is_running
def test_socket(host):
assert host.socket('tcp://0.0.0.0:5001').is_listening
| {
"content_hash": "73592468a8133a1f45b71d5ccfdd2912",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 63,
"avg_line_length": 26.096774193548388,
"alnum_prop": 0.6736711990111248,
"repo_name": "sansible/logstash",
"id": "5b921ccceb5620efda89b674d0c086b1015c6dfb",
"size": "809",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "molecule/default/tests/test_default.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3072"
},
{
"name": "Python",
"bytes": "2427"
}
],
"symlink_target": ""
} |
from django.db import models, transaction, IntegrityError
from django.utils.timezone import now
from . import app_settings
from . import utils
class PasswordChangeManager(models.Manager):
def get_or_create_for_user(self, user):
return self.get_or_create(user=user)
def is_required_for_user(self, user):
obj, created = self.get_or_create_for_user(user=user)
return obj.required
class SessionManager(models.Manager):
def active(self, user=None):
qs = self.filter(expiration_date__gt=now())
if user is not None:
qs = qs.filter(user=user)
return qs.order_by('-last_activity')
def create_session(self, request, user):
ip = utils.resolve(app_settings.IP_RESOLVER, request)
device = utils.resolve(app_settings.DEVICE_RESOLVER, request)
location = utils.resolve(app_settings.LOCATION_RESOLVER, request)
user_agent = request.META.get('HTTP_USER_AGENT', '')
user_agent = user_agent[:200] if user_agent else user_agent
try:
with transaction.atomic():
obj = self.create(
user=user,
session_key=request.session.session_key,
ip=ip,
user_agent=user_agent,
device=device,
location=location,
expiration_date=request.session.get_expiry_date(),
last_activity=now())
except IntegrityError:
obj = self.get(
user=user,
session_key=request.session.session_key)
obj.last_activity = now()
obj.save()
return obj
| {
"content_hash": "8e9f61c1acb0be37999e9aed9e3a7630",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 73,
"avg_line_length": 33.84,
"alnum_prop": 0.5862884160756501,
"repo_name": "ulule/django-safety",
"id": "6e6f0d2f60e44ab1df98b6d4692763a687369516",
"size": "1716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "safety/managers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5783"
},
{
"name": "Makefile",
"bytes": "1230"
},
{
"name": "Python",
"bytes": "37127"
}
],
"symlink_target": ""
} |
"""Replaces the template markup (PHP, Jinja, Mako) from an HTML file.
It replaces the markup so the lines and positions of actual HTML content is
preserved.
It uses Regexes to do the replacements so it is prone to errors in some corner
cases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from io import StringIO
import re
# Common patterns
LEADING_SPACES = r'(?P<spaces>^[\t ]+)'
NEW_LINE = r'(?P<newline>\n\r|\n|\r\n)'
# PHP patterns
PHP_START_ECHO = r'<\?='
PHP_START_TAG_WITH_ECHO = r'<\?php\s+echo'
PHP_START_TAG_SHORT_WITH_ECHO = r'<\?\s+echo'
PHP_START_TAG = r'<\?php'
# N.B.(skreft): there's no need to use a negative lookahead here [(?!=)], as the
# priority of start tags is lower than the one of echo tags.
PHP_START_TAG_SHORT = r'<\?'
PHP_END_TAG = r'\?>'
# Jinja patterns
JINJA_START_TAG = r'{%'
JINJA_START_ECHO = r'{{'
JINJA_END_TAG = r'%}'
JINJA_END_ECHO = r'}}'
# Mako patterns
MAKO_START_ECHO = r'\${'
MAKO_END_ECHO = r'}'
MAKO_START_TAG = (
r'(</?%|%\s*(end)?(if|else|elif|for|while|try|catch|except|finally)|##)')
MAKO_END_TAG = r'([%/]?>|$)'
def get_pattern(echo_tags, start_tags, end_tags):
"""Constructs a pattern to use with the method clean().
The priority of the tags is echo_tags, then start_tags and finally end_tags.
That means that in some cases a negative lookahead may be required, as the
first match from left to right will be reported.
See clean() for the meaning of each group.
Args:
echo_tags: list of re strings. These are tags that may output something.
start_tags: list of re strings. Tags that typically do not output
anything, like control structures.
end_tags: list of re strings. All the closing tags.
Returns:
a RegexObject.
"""
return re.compile(
'|'.join((
LEADING_SPACES,
NEW_LINE,
r'(?P<echo>%s)' % '|'.join(echo_tags),
r'(?P<start>%s)' % '|'.join(start_tags),
r'(?P<end>%s)' % '|'.join(end_tags))),
flags=re.MULTILINE)
JINJA_PATTERN = get_pattern(
[JINJA_START_ECHO],
[JINJA_START_TAG],
[JINJA_END_TAG, JINJA_END_ECHO])
PHP_PATTERN = get_pattern(
[PHP_START_ECHO, PHP_START_TAG_WITH_ECHO, PHP_START_TAG_SHORT_WITH_ECHO],
[PHP_START_TAG, PHP_START_TAG_SHORT],
[PHP_END_TAG])
MAKO_PATTERN = get_pattern(
[MAKO_START_ECHO],
[MAKO_START_TAG],
[MAKO_END_TAG, MAKO_END_ECHO])
ALL_PATTERN = get_pattern(
[PHP_START_ECHO, PHP_START_TAG_WITH_ECHO, PHP_START_TAG_SHORT_WITH_ECHO,
JINJA_START_ECHO, MAKO_START_ECHO],
[PHP_START_TAG, PHP_START_TAG_SHORT, JINJA_START_TAG, MAKO_START_TAG],
[PHP_END_TAG, JINJA_END_TAG, JINJA_END_ECHO, MAKO_END_TAG, MAKO_END_ECHO])
def _get_tag(match):
groups = match.groupdict()
if groups.get('echo') is not None:
return 'ECHO'
elif groups.get('end') is not None:
return 'END'
elif groups.get('start') is not None:
return 'START'
elif groups.get('newline') is not None:
return 'NEWLINE'
elif groups.get('spaces') is not None:
return 'SPACES'
print(groups)
assert False, ('Only the groups "echo", "end", "start", "newline" and ' +
'"spaces" are allowed. Please correct your pattern or use ' +
'the method get_pattern() to construct it.')
class _TemplateRemover(object):
"""Helper class for the clean() method.
This class exists mainly to factor out some methods.
"""
def __init__(self, html_content, pattern=ALL_PATTERN):
self.html_content = html_content
self.pattern = pattern
self._output = StringIO()
self._index = 0
self._state = 'HTML'
self._pending = []
self._pending_has_blank = False
def _reset_pending(self):
self._pending = []
self._pending_has_blank = False
def _write_content(self, end=None):
self._output.writelines(self._pending)
self._reset_pending()
self._output.write(self.html_content[self._index:end])
def get_clean_content(self):
"""Implementation of the clean() method."""
fill_chars = {'BLANK_TEMPLATE': ' ', 'ECHO_TEMPLATE': '0'}
for match in self.pattern.finditer(self.html_content):
start, end = match.start(), match.end()
tag = _get_tag(match)
if tag == 'ECHO':
self._write_content(start)
self._index = start
self._state = 'ECHO_TEMPLATE'
elif tag == 'START':
if self._index != start:
self._write_content(start)
self._index = start
self._state = 'BLANK_TEMPLATE'
elif tag == 'END':
if self._state not in ('BLANK_TEMPLATE', 'ECHO_TEMPLATE'):
# We got a closing tag but none was open. We decide to carry
# on as it may be the case that it was because of a closing
# dictionary in javascript like: var dict = {foo:{}}.
# See the note on the clean() function for more details.
continue
fill_char = fill_chars[self._state]
fill = fill_char * (end - self._index)
if self._state == 'BLANK_TEMPLATE':
self._pending.append(fill)
self._pending_has_blank = True
else:
assert not self._pending
self._output.write(fill)
self._index = end
self._state = 'HTML'
elif tag == 'SPACES':
self._pending.append(match.group('spaces'))
self._index = end
elif tag == 'NEWLINE':
if self._state == 'HTML':
if self._index != start or not self._pending_has_blank:
self._write_content(start)
self._output.write(match.group('newline'))
elif self._state == 'BLANK_TEMPLATE':
# We discard the content of this template and whatever is in
# self._pending.
self._output.write(match.group('newline'))
elif self._state == 'ECHO_TEMPLATE':
assert False, 'Echo tags should be in just one line.'
self._index = end
self._reset_pending()
assert self._state == 'HTML', 'Tag was not closed'
if self._index != len(self.html_content) or not self._pending_has_blank:
self._write_content()
return self._output.getvalue()
def clean(html_content, pattern=ALL_PATTERN):
"""Removes the markup from the supplied string.
Note: this is not a fully compliant markup remover as it is only based in
some regular expressions. This means there are some edge cases that cannot
be captured with this method. Although we believe those cases are too
contrived, and probably should be avoided as some development tools will
fail as well.
One example that won't work is the following:
<?php echo "?>" ?>
The reason it does not work is because when the method sees the first '?>'
(the one inside the string), it thinks it's a closing tag.
This method works by finding the beginning and ending of the tags. The tags
are grouped in three categories.
echo_tags: these are tags that will typically produce some output and
should be replaced with actual content not blanks. It will output as
many zeroes as the length of the original tag. The length is preserved
so the columns and lines are preserved for the html. The filling
character is a '0', just because is a valid value for a number and some
html attributes require numbers.
Strong assumption: this tag should be in just one line.
For example, href="<?= $foo ?>" is replaced with href="00000000000".
start_tags: these tags do not usually print content, these are mainly
control structures. The content of this tag is replaced by blanks or
removed depending on the context. If the tag is open and closed in the
same line then it is replaced by the same number of blanks, but only if
there was nonblank content before or after. If there is multiline then
the contents are just removed. The contents are stripped, so there are
no trailing spaces introduced by the markup.
For example:
" <?php if { ?>" is replaced by ""
but
" <?php if { ?> <!-- A comment ->" is replaced by
" <!-- A comment ->"
end_tags: the closing tags for both echo and start tags. They trigger the
output.
Args:
html_content: the string to be cleaned.
pattern: a RegexObject containing only the groups "echo", "end", "start",
"newline" and "spaces". Use the method get_pattern to construct it.
By default it will clean all defined markups (PHP and Jinja).
Returns:
A string with the markup removed.
"""
return _TemplateRemover(html_content, pattern).get_clean_content()
def clean_php(html_content):
"""Removes the PHP markup from the supplied string.
It does not support the optional asp tags.
See clean() for more details.
"""
return clean(html_content, pattern=PHP_PATTERN)
def clean_jinja(html_content):
"""Removes the Jinja markup from the supplied string.
See clean() for more details.
"""
return clean(html_content, pattern=JINJA_PATTERN)
def clean_mako(html_content):
"""Removes the Mako markup from the supplied string.
See clean() for more details.
"""
return clean(html_content, pattern=MAKO_PATTERN)
| {
"content_hash": "8aa4b90b681836194f99b63a76ff64a4",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 80,
"avg_line_length": 36.67407407407408,
"alnum_prop": 0.6047263179155726,
"repo_name": "deezer/template-remover",
"id": "a8a60cdd15dc4f5318b50f54105a89a7f19a5c28",
"size": "10497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "template_remover.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19644"
}
],
"symlink_target": ""
} |
from pudzu.charts import *
df = pd.read_csv("datasets/flagsspain.csv")
array = [[dict(r) for _,r in rows] for rows in generate_batches(df.iterrows(), 5)]
data = pd.DataFrame(array)
FONT = calibri
fg, bg="black", "#EEEEEE"
default_img = "https://s-media-cache-ak0.pinimg.com/736x/0d/36/e7/0d36e7a476b06333d9fe9960572b66b9.jpg"
def process(d):
if not d: return None
description = get_non(d, 'description')
description = "({})".format(description) if description else " "
flag = Image.from_url_with_cache(get_non(d, 'flag', default_img)).to_rgba()
flag = flag.resize_fixed_aspect(height=198) if flag.width / flag.height < 1.3 else flag.resize((318,198))
flag = flag.pad(1, "grey")
return Image.from_column([
Image.from_text(d['name'], FONT(32, bold=True), beard_line=True, fg=fg),
Image.from_text(description, FONT(24, italics=True), fg=fg),
flag
], padding=2, bg=bg, equal_widths=True)
title = Image.from_text("Flags of the Spanish Autonomies".upper(), FONT(80, bold=True), fg=fg, bg=bg).pad(40, bg)
grid = grid_chart(data, process, padding=(10,20), fg=fg, bg=bg, yalign=(0.5,0.5,0.5))
img = Image.from_column([title, grid, Rectangle((0,40))], bg=bg)
img.place(Image.from_text("/u/Udzu", FONT(24), fg=fg, bg=bg, padding=5).pad((1,1,0,0), fg), align=1, padding=5, copy=False)
img.save("output/flagsspain.png")
| {
"content_hash": "254311202f03a33f1aacb21b1b16779e",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 123,
"avg_line_length": 47.03448275862069,
"alnum_prop": 0.6686217008797654,
"repo_name": "Udzu/pudzu",
"id": "c1da9d5c088780be9d8e2c2519790c93714b2e3c",
"size": "1364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dataviz/flagsspain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7945"
},
{
"name": "Python",
"bytes": "867429"
},
{
"name": "Roff",
"bytes": "3702309"
}
],
"symlink_target": ""
} |
import argparse
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
from hooks import install
from py_utils import binary_manager
from py_utils import dependency_util
from py_utils import xvfb
# Path to dependency manager config containing chrome binary data.
CHROME_BINARIES_CONFIG = dependency_util.ChromeBinariesConfigPath()
CHROME_CONFIG_URL = (
'https://code.google.com/p/chromium/codesearch#chromium/src/third_party/'
'catapult/py_utils/py_utils/chrome_binaries.json')
# Default port to run on if not auto-assigning from OS
DEFAULT_PORT = '8111'
# Mapping of sys.platform -> platform-specific names and paths.
PLATFORM_MAPPING = {
'linux2': {
'omaha': 'linux',
'prefix': 'Linux_x64',
'zip_prefix': 'linux',
'chromepath': 'chrome-linux/chrome'
},
'win32': {
'omaha': 'win',
'prefix': 'Win',
'zip_prefix': 'win32',
'chromepath': 'chrome-win32\\chrome.exe',
},
'darwin': {
'omaha': 'mac',
'prefix': 'Mac',
'zip_prefix': 'mac',
'chromepath': ('chrome-mac/Chromium.app/Contents/MacOS/Chromium'),
'version_path': 'chrome-mac/Chromium.app/Contents/Versions/',
'additional_paths': [
('chrome-mac/Chromium.app/Contents/Versions/%VERSION%/'
'Chromium Helper.app/Contents/MacOS/Chromium Helper'),
],
},
}
def IsDepotToolsPath(path):
return os.path.isfile(os.path.join(path, 'gclient'))
def FindDepotTools():
# Check if depot_tools is already in PYTHONPATH
for path in sys.path:
if path.rstrip(os.sep).endswith('depot_tools') and IsDepotToolsPath(path):
return path
# Check if depot_tools is in the path
for path in os.environ['PATH'].split(os.pathsep):
if IsDepotToolsPath(path):
return path.rstrip(os.sep)
return None
def GetLocalChromePath(path_from_command_line):
if path_from_command_line:
return path_from_command_line
if sys.platform == 'darwin': # Mac
chrome_path = (
'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome')
if os.path.isfile(chrome_path):
return chrome_path
elif sys.platform.startswith('linux'):
found = False
try:
with open(os.devnull, 'w') as devnull:
found = subprocess.call(['google-chrome', '--version'],
stdout=devnull, stderr=devnull) == 0
except OSError:
pass
if found:
return 'google-chrome'
elif sys.platform == 'win32':
search_paths = [os.getenv('PROGRAMFILES(X86)'),
os.getenv('PROGRAMFILES'),
os.getenv('LOCALAPPDATA')]
chrome_path = os.path.join('Google', 'Chrome', 'Application', 'chrome.exe')
for search_path in search_paths:
test_path = os.path.join(search_path, chrome_path)
if os.path.isfile(test_path):
return test_path
return None
def Main(argv):
try:
parser = argparse.ArgumentParser(
description='Run dev_server tests for a project.')
parser.add_argument('--chrome_path', type=str,
help='Path to Chrome browser binary.')
parser.add_argument('--no-use-local-chrome',
dest='use_local_chrome', action='store_false')
parser.add_argument(
'--no-install-hooks', dest='install_hooks', action='store_false')
parser.add_argument('--tests', type=str,
help='Set of tests to run (tracing or perf_insights)')
parser.add_argument('--channel', type=str, default='stable',
help='Chrome channel to run (stable or canary)')
parser.add_argument('--presentation-json', type=str,
help='Recipe presentation-json output file path')
parser.set_defaults(install_hooks=True)
parser.set_defaults(use_local_chrome=True)
args = parser.parse_args(argv[1:])
if args.install_hooks:
install.InstallHooks()
user_data_dir = tempfile.mkdtemp()
tmpdir = None
xvfb_process = None
server_path = os.path.join(os.path.dirname(
os.path.abspath(__file__)), os.pardir, 'bin', 'run_dev_server')
# TODO(anniesullie): Make OS selection of port work on Windows. See #1235.
if sys.platform == 'win32':
port = DEFAULT_PORT
else:
port = '0'
server_command = [server_path, '--no-install-hooks', '--port', port]
if sys.platform.startswith('win'):
server_command = ['python.exe'] + server_command
print "Starting dev_server..."
server_process = subprocess.Popen(
server_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
bufsize=1)
time.sleep(1)
if sys.platform != 'win32':
output = server_process.stderr.readline()
port = re.search(
r'Now running on http://127.0.0.1:([\d]+)', output).group(1)
chrome_info = None
if args.use_local_chrome:
chrome_path = GetLocalChromePath(args.chrome_path)
if not chrome_path:
logging.error('Could not find path to chrome.')
sys.exit(1)
chrome_info = 'with command `%s`' % chrome_path
else:
channel = args.channel
if sys.platform == 'linux2' and channel == 'canary':
channel = 'dev'
assert channel in ['stable', 'beta', 'dev', 'canary']
print 'Fetching the %s chrome binary via the binary_manager.' % channel
chrome_manager = binary_manager.BinaryManager([CHROME_BINARIES_CONFIG])
arch, os_name = dependency_util.GetOSAndArchForCurrentDesktopPlatform()
chrome_path, version = chrome_manager.FetchPathWithVersion(
'chrome_%s' % channel, arch, os_name)
print 'Finished fetching the chrome binary to %s' % chrome_path
if xvfb.ShouldStartXvfb():
print 'Starting xvfb...'
xvfb_process = xvfb.StartXvfb()
chrome_info = 'version %s from channel %s' % (version, channel)
chrome_command = [
chrome_path,
'--user-data-dir=%s' % user_data_dir,
'--no-sandbox',
'--no-experiments',
'--no-first-run',
'--noerrdialogs',
'--window-size=1280,1024',
('http://localhost:%s/%s/tests.html?' % (port, args.tests)) +
'headless=true&testTypeToRun=all',
]
print "Starting Chrome %s..." % chrome_info
chrome_process = subprocess.Popen(
chrome_command, stdout=sys.stdout, stderr=sys.stderr)
print 'chrome process command: %s' % ' '.join(chrome_command)
print "Waiting for tests to finish..."
server_out, server_err = server_process.communicate()
print "Killing Chrome..."
if sys.platform == 'win32':
# Use taskkill on Windows to make sure Chrome and all subprocesses are
# killed.
subprocess.call(['taskkill', '/F', '/T', '/PID', str(chrome_process.pid)])
else:
chrome_process.kill()
if server_process.returncode != 0:
logging.error('Tests failed!')
logging.error('Server stdout:\n%s', server_out)
logging.error('Server stderr:\n%s', server_err)
else:
print server_out
if args.presentation_json:
with open(args.presentation_json, 'w') as recipe_out:
# Add a link to the buildbot status for the step saying which version
# of Chrome the test ran on. The actual linking feature is not used,
# but there isn't a way to just add text.
link_name = 'Chrome Version %s' % version
presentation_info = {'links': {link_name: CHROME_CONFIG_URL}}
json.dump(presentation_info, recipe_out)
finally:
# Wait for Chrome to be killed before deleting temp Chrome dir. Only have
# this timing issue on Windows.
if sys.platform == 'win32':
time.sleep(5)
if tmpdir:
try:
shutil.rmtree(tmpdir)
shutil.rmtree(user_data_dir)
except OSError as e:
logging.error('Error cleaning up temp dirs %s and %s: %s',
tmpdir, user_data_dir, e)
if xvfb_process:
xvfb_process.kill()
sys.exit(server_process.returncode)
| {
"content_hash": "01940883346d7868c9c3320f22745bd4",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 80,
"avg_line_length": 35.122270742358076,
"alnum_prop": 0.6273778440880269,
"repo_name": "catapult-project/catapult-csm",
"id": "596f34582199d3e03d8a39612e286e93a2814ac7",
"size": "8228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "catapult_build/run_dev_server_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "C++",
"bytes": "43728"
},
{
"name": "CSS",
"bytes": "24873"
},
{
"name": "Go",
"bytes": "80325"
},
{
"name": "HTML",
"bytes": "11817766"
},
{
"name": "JavaScript",
"bytes": "518002"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "Python",
"bytes": "6207634"
},
{
"name": "Shell",
"bytes": "2558"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(
name='mbutil',
version='0.0.2',
author='Tom MacWright',
author_email='macwright@gmail.com',
packages=['mbutil'],
scripts=['mb-util'],
url='https://github.com/mapbox/mbutil',
license='LICENSE.md',
description='An importer and exporter for MBTiles',
long_description=open('README.md').read(),
)
| {
"content_hash": "5c895c7c52cba334b825b5489932c000",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 55,
"avg_line_length": 26.714285714285715,
"alnum_prop": 0.6524064171122995,
"repo_name": "rouault/mbutil",
"id": "f6d347e19f19d76ea1549976f86745010272d95a",
"size": "374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12452"
}
],
"symlink_target": ""
} |
import unittest
from dart.client.python.dart_client import Dart
from dart.engine.no_op.metadata import NoOpActionTypes
from dart.model.action import ActionData, Action, ActionState
from dart.model.datastore import Datastore, DatastoreData, DatastoreState
from dart.model.trigger import Trigger, TriggerState
from dart.model.trigger import TriggerData
from dart.model.workflow import WorkflowData, WorkflowState, WorkflowInstanceState
from dart.model.workflow import Workflow
class TestSuperTriggerWorkflowChaining(unittest.TestCase):
def setUp(self):
dart = Dart(host='localhost', port=5000)
""" :type dart: dart.client.python.dart_client.Dart """
self.dart = dart
dst_args = {'action_sleep_time_in_seconds': 0}
dst0 = Datastore(data=DatastoreData('test-datastore0', 'no_op_engine', args=dst_args, state=DatastoreState.TEMPLATE))
self.datastore0 = self.dart.save_datastore(dst0)
dst1 = Datastore(data=DatastoreData('test-datastore1', 'no_op_engine', args=dst_args, state=DatastoreState.TEMPLATE))
self.datastore1 = self.dart.save_datastore(dst1)
wf0 = Workflow(data=WorkflowData('test-workflow0', self.datastore0.id, state=WorkflowState.ACTIVE))
self.workflow0 = self.dart.save_workflow(wf0, self.datastore0.id)
wf1 = Workflow(data=WorkflowData('test-workflow1', self.datastore1.id, state=WorkflowState.ACTIVE))
self.workflow1 = self.dart.save_workflow(wf1, self.datastore1.id)
a00 = Action(data=ActionData(NoOpActionTypes.action_that_succeeds.name, NoOpActionTypes.action_that_succeeds.name, state=ActionState.TEMPLATE))
a01 = Action(data=ActionData(NoOpActionTypes.action_that_succeeds.name, NoOpActionTypes.action_that_succeeds.name, state=ActionState.TEMPLATE))
self.action00, self.action01 = self.dart.save_actions([a00, a01], workflow_id=self.workflow0.id)
a10 = Action(data=ActionData(NoOpActionTypes.action_that_succeeds.name, NoOpActionTypes.action_that_succeeds.name, state=ActionState.TEMPLATE))
a11 = Action(data=ActionData(NoOpActionTypes.action_that_succeeds.name, NoOpActionTypes.action_that_succeeds.name, state=ActionState.TEMPLATE))
self.action10, self.action11 = self.dart.save_actions([a10, a11], workflow_id=self.workflow1.id)
tr_args = {'completed_workflow_id': self.workflow0.id}
tr = Trigger(data=TriggerData('test-trigger', 'workflow_completion', None, tr_args, TriggerState.ACTIVE))
self.trigger = self.dart.save_trigger(tr)
st_args = {'fire_after': 'ALL', 'completed_trigger_ids': [self.trigger.id]}
st = Trigger(data=TriggerData('test-super-trigger', 'super', [self.workflow1.id], st_args, TriggerState.ACTIVE))
self.super_trigger = self.dart.save_trigger(st)
def tearDown(self):
for a in self.dart.get_actions(workflow_id=self.workflow0.id):
self.dart.delete_action(a.id)
for a in self.dart.get_actions(workflow_id=self.workflow1.id):
self.dart.delete_action(a.id)
for wfi in self.dart.get_workflow_instances(self.workflow0.id):
self.dart.delete_datastore(wfi.data.datastore_id)
for wfi in self.dart.get_workflow_instances(self.workflow1.id):
self.dart.delete_datastore(wfi.data.datastore_id)
self.dart.delete_trigger(self.super_trigger.id)
self.dart.delete_trigger(self.trigger.id)
self.dart.delete_workflow_instances(self.workflow0.id)
self.dart.delete_workflow_instances(self.workflow1.id)
self.dart.delete_workflow(self.workflow0.id)
self.dart.delete_workflow(self.workflow1.id)
self.dart.delete_datastore(self.datastore0.id)
self.dart.delete_datastore(self.datastore1.id)
def test_super_trigger_workflow_chaining(self):
self.dart.manually_trigger_workflow(self.workflow0.id)
wf_instances = self.dart.await_workflow_completion(self.workflow1.id)
for wfi in wf_instances:
self.assertEqual(wfi.data.state, WorkflowInstanceState.COMPLETED)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "e8b562da1393960afd50e8c5a0f523f0",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 151,
"avg_line_length": 57.138888888888886,
"alnum_prop": 0.7207097715119105,
"repo_name": "RetailMeNotSandbox/dart",
"id": "35a07d6f246a7857d082763c7efc434122104575",
"size": "4114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/dart/test/full/test_super_trigger_workflow_chaining.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "103727"
},
{
"name": "HTML",
"bytes": "67636"
},
{
"name": "JavaScript",
"bytes": "2762304"
},
{
"name": "Nginx",
"bytes": "996"
},
{
"name": "PLpgSQL",
"bytes": "1475"
},
{
"name": "Python",
"bytes": "1025954"
},
{
"name": "Ruby",
"bytes": "5523"
},
{
"name": "Shell",
"bytes": "3100"
}
],
"symlink_target": ""
} |
import random
rand_num = random.randint(1, 10);
guessed_nums = []
allowed_guesses = 5
| {
"content_hash": "60d6e68e9d909967c32cff202ce17b3a",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 17.4,
"alnum_prop": 0.7011494252873564,
"repo_name": "Zenira/Treehouse",
"id": "b60ea72a2424467030000900f50d28d3c0ad7223",
"size": "87",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "number_game.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "87"
}
],
"symlink_target": ""
} |
"""
oauthlib.openid.connect.core.endpoints.pre_configured
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various endpoints needed
for providing OpenID Connect servers.
"""
from __future__ import absolute_import, unicode_literals
from oauthlib.oauth2.rfc6749.endpoints import (
AuthorizationEndpoint,
IntrospectEndpoint,
ResourceEndpoint,
RevocationEndpoint,
TokenEndpoint
)
from oauthlib.oauth2.rfc6749.grant_types import (
AuthorizationCodeGrant as OAuth2AuthorizationCodeGrant,
ImplicitGrant as OAuth2ImplicitGrant,
ClientCredentialsGrant,
RefreshTokenGrant,
ResourceOwnerPasswordCredentialsGrant
)
from oauthlib.oauth2.rfc6749.tokens import BearerToken
from ..grant_types import (
AuthorizationCodeGrant,
ImplicitGrant,
HybridGrant,
)
from ..grant_types.dispatchers import (
AuthorizationCodeGrantDispatcher,
ImplicitTokenGrantDispatcher,
AuthorizationTokenGrantDispatcher
)
from ..tokens import JWTToken
from .userinfo import UserInfoEndpoint
class Server(AuthorizationEndpoint, IntrospectEndpoint, TokenEndpoint,
ResourceEndpoint, RevocationEndpoint, UserInfoEndpoint):
"""An all-in-one endpoint featuring all four major grant types."""
def __init__(self, request_validator, token_expires_in=None,
token_generator=None, refresh_token_generator=None,
*args, **kwargs):
"""Construct a new all-grants-in-one server.
:param request_validator: An implementation of
oauthlib.oauth2.RequestValidator.
:param token_expires_in: An int or a function to generate a token
expiration offset (in seconds) given a
oauthlib.common.Request object.
:param token_generator: A function to generate a token from a request.
:param refresh_token_generator: A function to generate a token from a
request for the refresh token.
:param kwargs: Extra parameters to pass to authorization-,
token-, resource-, and revocation-endpoint constructors.
"""
auth_grant = OAuth2AuthorizationCodeGrant(request_validator)
implicit_grant = OAuth2ImplicitGrant(request_validator)
password_grant = ResourceOwnerPasswordCredentialsGrant(
request_validator)
credentials_grant = ClientCredentialsGrant(request_validator)
refresh_grant = RefreshTokenGrant(request_validator)
openid_connect_auth = AuthorizationCodeGrant(request_validator)
openid_connect_implicit = ImplicitGrant(request_validator)
openid_connect_hybrid = HybridGrant(request_validator)
bearer = BearerToken(request_validator, token_generator,
token_expires_in, refresh_token_generator)
jwt = JWTToken(request_validator, token_generator,
token_expires_in, refresh_token_generator)
auth_grant_choice = AuthorizationCodeGrantDispatcher(default_grant=auth_grant, oidc_grant=openid_connect_auth)
implicit_grant_choice = ImplicitTokenGrantDispatcher(default_grant=implicit_grant, oidc_grant=openid_connect_implicit)
# See http://openid.net/specs/oauth-v2-multiple-response-types-1_0.html#Combinations for valid combinations
# internally our AuthorizationEndpoint will ensure they can appear in any order for any valid combination
AuthorizationEndpoint.__init__(self, default_response_type='code',
response_types={
'code': auth_grant_choice,
'token': implicit_grant_choice,
'id_token': openid_connect_implicit,
'id_token token': openid_connect_implicit,
'code token': openid_connect_hybrid,
'code id_token': openid_connect_hybrid,
'code id_token token': openid_connect_hybrid,
'none': auth_grant
},
default_token_type=bearer)
token_grant_choice = AuthorizationTokenGrantDispatcher(request_validator, default_grant=auth_grant, oidc_grant=openid_connect_auth)
TokenEndpoint.__init__(self, default_grant_type='authorization_code',
grant_types={
'authorization_code': token_grant_choice,
'password': password_grant,
'client_credentials': credentials_grant,
'refresh_token': refresh_grant,
},
default_token_type=bearer)
ResourceEndpoint.__init__(self, default_token='Bearer',
token_types={'Bearer': bearer, 'JWT': jwt})
RevocationEndpoint.__init__(self, request_validator)
IntrospectEndpoint.__init__(self, request_validator)
UserInfoEndpoint.__init__(self, request_validator)
| {
"content_hash": "40a6fe01ccb804760bcede24d6bcf0b8",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 139,
"avg_line_length": 49.5,
"alnum_prop": 0.6077441077441077,
"repo_name": "javier-ruiz-b/docker-rasppi-images",
"id": "fde2739e851a25d4ba8bce435018c00278b1b20c",
"size": "5370",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "raspberry-google-home/env/lib/python3.7/site-packages/oauthlib/openid/connect/core/endpoints/pre_configured.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "15254"
},
{
"name": "PHP",
"bytes": "1132"
},
{
"name": "Shell",
"bytes": "17522"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, redirect
from django import forms
from ref.models.instances import ComponentInstance, Environment
from ref.creation import duplicate_envt
from django.forms.formsets import formset_factory
from django.contrib.auth.decorators import permission_required
from django.db.transaction import atomic
def envt_duplicate(request, envt_name):
e = duplicate_envt(envt_name, "new_name", {})
return redirect('admin:ref_environment_change', e.id)
@permission_required('ref.scm_addenvironment')
@atomic
def envt_duplicate_name(request, envt_name):
e = Environment.objects.get(name=envt_name, project=request.project)
FS = formset_factory(DuplicateFormRelInline, extra=0)
if request.method == 'POST': # If the form has been submitted...
form = DuplicateForm(request.POST, envt=e) # A form bound to the POST data
fs = FS(request.POST, form_kwargs={'project':request.project})
if form.is_valid() and fs.is_valid(): # All validation rules pass
remaps = {}
for f in fs.cleaned_data:
if f['new_target']:
remaps[f['old_target'].id] = f['new_target'].id
e1 = duplicate_envt(envt_name, form.cleaned_data['new_name'], remaps, *ComponentInstance.objects.filter(pk__in=form.cleaned_data['instances_to_copy']))
return redirect('admin:ref_environment_change', e1.id)
else:
form = DuplicateForm(envt=e) # An unbound form
## Create a formset for each external relation
internal_pks = [i.pk for i in e.component_instances.all()]
ext = {}
initial_rel = []
for cpn in e.component_instances.all():
for rel in cpn.relationships.all():
if not rel.id in internal_pks:
ext[rel] = None
for rel in ext.keys():
initial_rel .append({'old_target':rel, 'new_target': None})
fs = FS(initial=initial_rel, form_kwargs={'project':request.project})
return render(request, 'ref/envt_duplicate.html', {'form': form, 'envt': e, 'fs': fs})
#########################################
## Forms
#########################################
class DuplicateFormRelInline(forms.Form):
old_target = forms.ModelChoiceField(queryset=ComponentInstance.objects.all())
new_target = forms.ModelChoiceField(queryset=ComponentInstance.objects.none(), empty_label='-- Don\'t remap --', required=False)
def __init__(self, project, *args, **kwargs):
super(DuplicateFormRelInline, self).__init__(*args, **kwargs)
if self.is_bound:
self.fields['new_target'].queryset = ComponentInstance.objects.get(pk=self.data[self.prefix + '-old_target']).description.instance_set.filter(project=project)
if 'old_target' in self.initial and self.initial['old_target']:
self.fields['new_target'].queryset = self.initial['old_target'].description.instance_set.filter(project=project)
class DuplicateForm(forms.Form):
new_name = forms.CharField(max_length=20)
instances_to_copy = forms.TypedMultipleChoiceField(choices=(), initial=(), widget=forms.widgets.CheckboxSelectMultiple, coerce=int)
def __init__(self, *args, **kwargs):
self.envt = kwargs['envt']
del kwargs['envt']
super(DuplicateForm, self).__init__(*args, **kwargs)
self.fields['instances_to_copy'].choices = [(i.pk, i.__str__()) for i in self.envt.component_instances.all()]
self.fields['instances_to_copy'].initial = [i.pk for i in self.envt.component_instances.all()]
| {
"content_hash": "00722ef84e4f69ff6d43a0f91b9c6aad",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 170,
"avg_line_length": 46.27272727272727,
"alnum_prop": 0.6497333707549817,
"repo_name": "marcanpilami/MAGE",
"id": "1c0394d29006db583a53d9ad3daedaad09de053d",
"size": "3580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ref/views/duplicate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16033"
},
{
"name": "Dockerfile",
"bytes": "1730"
},
{
"name": "HTML",
"bytes": "88971"
},
{
"name": "JavaScript",
"bytes": "6024"
},
{
"name": "Python",
"bytes": "401724"
},
{
"name": "Shell",
"bytes": "20159"
}
],
"symlink_target": ""
} |
'''Generate simConst.py from simConst.h distributed from CoppeliaSim
$ ./generate_simConst.py > ../pyrep/backend/simConst.py
'''
import contextlib
import importlib
import os
import os.path as osp
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
try:
import CppHeaderParser
except ImportError:
print('Please run following:\n\tpip install CppHeaderParser',
file=sys.stderr)
sys.exit(1)
def get_coppeliasim_root():
if 'COPPELIASIM_ROOT' not in os.environ:
raise RuntimeError('Please set env COPPELIASIM_ROOT')
return os.environ['COPPELIASIM_ROOT']
def import_as(filename, module):
spec = importlib.util.spec_from_file_location(
module, filename
)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def generate_simConst_py():
header_file = osp.join(
get_coppeliasim_root(), 'programming/include/simConst.h'
)
with contextlib.redirect_stdout(sys.stderr):
header = CppHeaderParser.CppHeader(header_file)
assert header.classes == {}
assert header.functions == []
assert header.global_enums == {}
names = []
for define in header.defines:
m = re.match('^(.*)/\*.*\*/', define)
if m:
define = m.groups()[0]
splits = define.split()
if len(splits) == 2:
name, _ = splits
if name in ['SIM_PROGRAM_VERSION']:
continue
names.append(name)
for enum in header.enums:
assert enum['namespace'] == ''
for value in enum['values']:
names.append(value['name'])
names.append('')
out_dir = tempfile.mkdtemp()
cpp_file = osp.join(out_dir, 'generate_simConst_py.cpp')
with open(cpp_file, 'w') as f:
f.write('#include <iostream>\n')
f.write('#include <simConst.h>\n')
f.write('int main() {\n')
for name in names:
if name:
f.write(
f'\tstd::cout << "{name} = " << {name} << std::endl;\n'
)
else:
f.write('\tstd::cout << std::endl;\n')
f.write('}\n')
out_file = osp.join(out_dir, 'generate_simConst_py')
cmd = f'g++ {cpp_file} -o {out_file} -I{osp.dirname(header_file)}'
subprocess.check_call(shlex.split(cmd))
cmd = osp.join(out_dir, 'generate_simConst_py')
code = subprocess.check_output(cmd).strip().decode()
shutil.rmtree(out_dir)
return code
def merge_simConst_py(code):
official_file = osp.join(
get_coppeliasim_root(),
'programming/remoteApiBindings/python/python/simConst.py')
official = import_as(official_file, 'official')
out_dir = tempfile.mkdtemp()
generated_file = osp.join(out_dir, 'simConst_generated.py')
with open(generated_file, 'w') as f:
f.write(code)
generated = import_as(generated_file, 'generated')
name_and_value = []
for name in dir(official):
if re.match('__.*__', name):
continue
if name in dir(generated):
value_official = getattr(official, name)
value_generated = getattr(generated, name)
if value_official != value_generated:
print(f"WARNING: The values of var '{name}' is not equal: "
f'official={value_official}, '
f'generated={value_generated}', file=sys.stderr)
else:
name_and_value.append((name, getattr(official, name)))
code = ('# This file is automatically generated by '
f'{osp.basename(__file__)} from simConst.h\n\n' + code)
code += ('\n\n# Followings are copied from official simConst.py '
'(so possibly deprecated in C++ API)')
for name, value in name_and_value:
code += f'\n{name} = {value}'
code += '\n'
return code
def main():
code = generate_simConst_py()
code = merge_simConst_py(code)
print(code, end='')
if __name__ == '__main__':
main()
| {
"content_hash": "420d3a27cf6f484a154f6bdd78e61176",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 75,
"avg_line_length": 28.20138888888889,
"alnum_prop": 0.5875400147746861,
"repo_name": "stepjam/PyRep",
"id": "b90fa7e04f9e51f5b0d2c8da8b7a4e60347bd2d9",
"size": "4083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/generate_simConst.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "56307"
},
{
"name": "Lua",
"bytes": "16854"
},
{
"name": "Python",
"bytes": "428818"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.utils import six
from djblets.webapi.errors import INVALID_FORM_DATA, PERMISSION_DENIED
from reviewboard.attachments.models import (FileAttachment,
FileAttachmentHistory)
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (file_attachment_item_mimetype,
file_attachment_list_mimetype)
from reviewboard.webapi.tests.mixins import (BasicTestsMetaclass,
ReviewRequestChildItemMixin,
ReviewRequestChildListMixin)
from reviewboard.webapi.tests.urls import (get_file_attachment_item_url,
get_file_attachment_list_url)
@six.add_metaclass(BasicTestsMetaclass)
class ResourceListTests(ReviewRequestChildListMixin, BaseWebAPITestCase):
"""Testing the FileAttachmentResource list APIs."""
fixtures = ['test_users']
basic_get_fixtures = ['test_scmtools']
sample_api_url = 'review-requests/<id>/file-attachments/'
resource = resources.file_attachment
def setup_review_request_child_test(self, review_request):
return (get_file_attachment_list_url(review_request),
file_attachment_list_mimetype)
def compare_item(self, item_rsp, attachment):
self.assertEqual(item_rsp['id'], attachment.pk)
self.assertEqual(item_rsp['filename'], attachment.filename)
self.assertEqual(item_rsp['revision'], attachment.attachment_revision)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
review_request = self.create_review_request(
create_repository=True,
with_local_site=with_local_site,
submitter=user)
if populate_items:
# This is the file attachment that should be returned.
items = [
self.create_file_attachment(review_request,
orig_filename='logo1.png'),
]
# This attachment shouldn't be shown in the results. It represents
# a file to be shown in the diff viewer.
self.create_file_attachment(review_request,
orig_filename='logo2.png',
repo_path='/logo.png',
repo_revision='123',
repository=review_request.repository)
# This attachment shouldn't be shown either, for the same
# reasons.
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset,
source_file='/logo3.png',
dest_file='/logo3.png',
source_revision='123',
dest_detail='124')
self.create_file_attachment(review_request,
orig_filename='logo3.png',
added_in_filediff=filediff)
else:
items = []
return (get_file_attachment_list_url(review_request, local_site_name),
file_attachment_list_mimetype,
items)
#
# HTTP POST tests
#
def setup_basic_post_test(self, user, with_local_site, local_site_name,
post_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
return (get_file_attachment_list_url(review_request, local_site_name),
file_attachment_item_mimetype,
{'path': open(self.get_sample_image_filename(), 'rb')},
[review_request])
def check_post_result(self, user, rsp, review_request):
draft = review_request.get_draft()
self.assertIsNotNone(draft)
self.assertIn('file_attachment', rsp)
item_rsp = rsp['file_attachment']
attachment = FileAttachment.objects.get(pk=item_rsp['id'])
self.assertIn(attachment, draft.file_attachments.all())
self.assertNotIn(attachment, review_request.file_attachments.all())
self.compare_item(item_rsp, attachment)
def test_post_not_owner(self):
"""Testing the POST review-requests/<id>/file-attachments/ API
without owner
"""
review_request = self.create_review_request()
self.assertNotEqual(review_request.submitter, self.user)
with open(self.get_sample_image_filename(), 'rb') as f:
self.assertTrue(f)
rsp = self.api_post(
get_file_attachment_list_url(review_request),
{
'caption': 'logo',
'path': f,
},
expected_status=403)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], PERMISSION_DENIED.code)
def test_post_with_attachment_history_id(self):
"""Testing the POST review-requests/<id>/file-attachments/ API with a
file attachment history
"""
review_request = self.create_review_request(
submitter=self.user, publish=True, target_people=[self.user])
history = FileAttachmentHistory.objects.create(display_position=0)
review_request.file_attachment_histories.add(history)
self.assertEqual(history.latest_revision, 0)
with open(self.get_sample_image_filename(), 'rb') as f:
self.assertTrue(f)
rsp = self.api_post(
get_file_attachment_list_url(review_request),
{
'path': f,
'attachment_history': history.pk,
},
expected_mimetype=file_attachment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['file_attachment']['attachment_history_id'],
history.pk)
history = FileAttachmentHistory.objects.get(pk=history.pk)
self.assertEqual(history.latest_revision, 1)
review_request.get_draft().publish()
# Add a second revision
f.seek(0)
rsp = self.api_post(
get_file_attachment_list_url(review_request),
{
'path': f,
'attachment_history': history.pk,
},
expected_mimetype=file_attachment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['file_attachment']['attachment_history_id'],
history.pk)
history = FileAttachmentHistory.objects.get(pk=history.pk)
self.assertEqual(history.latest_revision, 2)
def test_post_with_attachment_history_id_wrong_review_request(self):
"""Testing the POST review-requests/<id>/file-attachments/ API with a
file attachment history belonging to a different reiew request
"""
review_request_1 = self.create_review_request(submitter=self.user,
publish=True)
history = FileAttachmentHistory.objects.create(display_position=0)
review_request_1.file_attachment_histories.add(history)
review_request_2 = self.create_review_request(submitter=self.user,
publish=True)
self.assertEqual(history.latest_revision, 0)
with open(self.get_sample_image_filename(), 'rb') as f:
self.assertTrue(f)
rsp = self.api_post(
get_file_attachment_list_url(review_request_2),
{
'path': f,
'attachment_history': history.pk,
},
expected_status=400)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], INVALID_FORM_DATA.code)
history = FileAttachmentHistory.objects.get(pk=history.pk)
self.assertEqual(history.latest_revision, 0)
@six.add_metaclass(BasicTestsMetaclass)
class ResourceItemTests(ReviewRequestChildItemMixin, BaseWebAPITestCase):
"""Testing the FileAttachmentResource item APIs."""
fixtures = ['test_users']
sample_api_url = 'review-requests/<id>/file-attachments/<id>/'
resource = resources.file_attachment
def setup_review_request_child_test(self, review_request):
file_attachment = self.create_file_attachment(review_request)
return (get_file_attachment_item_url(file_attachment),
file_attachment_item_mimetype)
def compare_item(self, item_rsp, attachment):
self.assertEqual(item_rsp['id'], attachment.pk)
self.assertEqual(item_rsp['filename'], attachment.filename)
self.assertEqual(item_rsp['revision'], attachment.attachment_revision)
self.assertEqual(item_rsp['absolute_url'],
attachment.get_absolute_url())
#
# HTTP DELETE tests
#
def setup_basic_delete_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user)
file_attachment = self.create_file_attachment(review_request)
return (get_file_attachment_item_url(file_attachment, local_site_name),
[review_request, file_attachment])
def check_delete_result(self, user, review_request, file_attachment):
draft = review_request.get_draft()
self.assertIsNotNone(draft)
self.assertIn(file_attachment, draft.inactive_file_attachments.all())
self.assertNotIn(file_attachment, draft.file_attachments.all())
self.assertIn(file_attachment, review_request.file_attachments.all())
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user)
file_attachment = self.create_file_attachment(review_request)
return (get_file_attachment_item_url(file_attachment, local_site_name),
file_attachment_item_mimetype,
file_attachment)
def test_get_not_modified(self):
"""Testing the GET review-requests/<id>/file-attachments/<id>/ API
with Not Modified response
"""
review_request = self.create_review_request(publish=True)
file_attachment = self.create_file_attachment(review_request)
self._testHttpCaching(get_file_attachment_item_url(file_attachment),
check_etags=True)
#
# HTTP PUT tests
#
def setup_basic_put_test(self, user, with_local_site, local_site_name,
put_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user)
file_attachment = self.create_file_attachment(review_request)
return (get_file_attachment_item_url(file_attachment, local_site_name),
file_attachment_item_mimetype,
{'caption': 'My new caption'},
file_attachment,
[review_request])
def check_put_result(self, user, item_rsp, file_attachment,
review_request):
file_attachment = FileAttachment.objects.get(pk=file_attachment.pk)
self.assertEqual(item_rsp['id'], file_attachment.pk)
self.assertEqual(file_attachment.draft_caption, 'My new caption')
draft = review_request.get_draft()
self.assertIsNotNone(draft)
self.assertIn(file_attachment, draft.file_attachments.all())
self.assertIn(file_attachment, review_request.file_attachments.all())
self.compare_item(item_rsp, file_attachment)
| {
"content_hash": "7ee25e12e34a498a4f330fb9c50a3c17",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 79,
"avg_line_length": 41.29865771812081,
"alnum_prop": 0.5877955635004469,
"repo_name": "chipx86/reviewboard",
"id": "0d946cc9f56a9ed677f0460f4e65a1820921fede",
"size": "12307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewboard/webapi/tests/test_file_attachment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "434719"
},
{
"name": "HTML",
"bytes": "224310"
},
{
"name": "JavaScript",
"bytes": "3830753"
},
{
"name": "Python",
"bytes": "7333453"
},
{
"name": "Shell",
"bytes": "777"
}
],
"symlink_target": ""
} |
import confluent.config.attributes as attrscheme
import confluent.interface.console as console
import confluent.exceptions as exc
import confluent.messages as msg
import confluent.noderange as noderange
import confluent.shellmodule as shellmodule
import itertools
import os
import sys
pluginmap = {}
def seek_element(currplace, currkey):
try:
return currplace[currkey]
except TypeError:
if isinstance(currplace, PluginCollection):
# we hit a plugin curated collection, all children
# are up to the plugin to comprehend
return currplace
raise
def nested_lookup(nestdict, key):
try:
return reduce(seek_element, key, nestdict)
except TypeError as e:
raise exc.NotFoundException("Invalid element requested")
def load_plugins():
# To know our plugins directory, we get the parent path of 'bin'
path = os.path.dirname(os.path.realpath(__file__))
plugintop = os.path.realpath(os.path.join(path, 'plugins'))
plugins = set()
for plugindir in os.listdir(plugintop):
plugindir = os.path.join(plugintop, plugindir)
if not os.path.isdir(plugindir):
continue
sys.path.append(plugindir)
#two passes, to avoid adding both py and pyc files
for plugin in os.listdir(plugindir):
if plugin.startswith('.'):
continue
(plugin, plugtype) = os.path.splitext(plugin)
if plugtype == '.sh':
pluginmap[plugin] = shellmodule.Plugin(
os.path.join(plugindir, plugin + '.sh'))
else:
plugins.add(plugin)
for plugin in plugins:
tmpmod = __import__(plugin)
if 'plugin_names' in tmpmod.__dict__:
for name in tmpmod.plugin_names:
pluginmap[name] = tmpmod
else:
pluginmap[plugin] = tmpmod
rootcollections = ['noderange/', 'nodes/', 'nodegroups/', 'users/']
class PluginRoute(object):
def __init__(self, routedict):
self.routeinfo = routedict
class PluginCollection(object):
def __init__(self, routedict):
self.routeinfo = routedict
# _ prefix indicates internal use (e.g. special console scheme) and should not
# be enumerated in any collection
noderesources = {
'_console': {
'session': PluginRoute({
'pluginattrs': ['console.method'],
}),
},
'console': {
#this is a dummy value, http or socket must handle special
'session': PluginRoute({}),
},
'power': {
'state': PluginRoute({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
},
'health': {
'hardware': PluginRoute({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
},
'identify': PluginRoute({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
'boot': {
'nextdevice': PluginRoute({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
},
'attributes': {
'all': PluginRoute({'handler': 'attributes'}),
'current': PluginRoute({'handler': 'attributes'}),
},
'sensors': {
'hardware': {
'all': PluginCollection({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
'temperature': PluginCollection({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
'power': PluginCollection({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
'fans': PluginCollection({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
},
},
}
nodegroupresources = {
'attributes': {
'all': PluginRoute({'handler': 'attributes'}),
'current': PluginRoute({'handler': 'attributes'}),
},
}
def create_user(inputdata, configmanager):
try:
username = inputdata['name']
del inputdata['name']
except (KeyError, ValueError):
raise exc.InvalidArgumentException()
configmanager.create_user(username, attributemap=inputdata)
def update_user(name, attribmap, configmanager):
try:
configmanager.set_user(name, attribmap)
except ValueError:
raise exc.InvalidArgumentException()
def show_user(name, configmanager):
userobj = configmanager.get_user(name)
rv = {}
for attr in attrscheme.user.iterkeys():
rv[attr] = None
if attr == 'password':
if 'cryptpass' in userobj:
rv['password'] = {'cryptvalue': True}
yield msg.CryptedAttributes(kv={'password': rv['password']},
desc=attrscheme.user[attr][
'description'])
else:
if attr in userobj:
rv[attr] = userobj[attr]
yield msg.Attributes(kv={attr: rv[attr]},
desc=attrscheme.user[attr]['description'])
def stripnode(iterablersp, node):
for i in iterablersp:
if i is None:
raise exc.NotImplementedException("Not Implemented")
i.strip_node(node)
yield i
def iterate_collections(iterable, forcecollection=True):
for coll in iterable:
if forcecollection and coll[-1] != '/':
coll += '/'
yield msg.ChildCollection(coll, candelete=True)
def iterate_resources(fancydict):
for resource in fancydict.iterkeys():
if resource.startswith("_"):
continue
if not isinstance(fancydict[resource], PluginRoute): # a resource
resource += '/'
yield msg.ChildCollection(resource)
def delete_user(user, configmanager):
configmanager.del_user(user)
yield msg.DeletedResource(user)
def delete_nodegroup_collection(collectionpath, configmanager):
if len(collectionpath) == 2: # just the nodegroup
group = collectionpath[-1]
configmanager.del_groups([group])
yield msg.DeletedResource(group)
else:
raise Exception("Not implemented")
def delete_node_collection(collectionpath, configmanager):
if len(collectionpath) == 2: # just node
node = collectionpath[-1]
configmanager.del_nodes([node])
yield msg.DeletedResource(node)
else:
raise Exception("Not implemented")
def enumerate_nodegroup_collection(collectionpath, configmanager):
nodegroup = collectionpath[1]
if not configmanager.is_nodegroup(nodegroup):
raise exc.NotFoundException("Invalid element requested")
del collectionpath[0:2]
collection = nested_lookup(nodegroupresources, collectionpath)
return iterate_resources(collection)
def enumerate_node_collection(collectionpath, configmanager):
if collectionpath == ['nodes']: # it is just '/node/', need to list nodes
allnodes = list(configmanager.list_nodes())
try:
allnodes.sort(key=noderange.humanify_nodename)
except TypeError:
allnodes.sort()
return iterate_collections(allnodes)
nodeorrange = collectionpath[1]
if collectionpath[0] == 'nodes' and not configmanager.is_node(nodeorrange):
raise exc.NotFoundException("Invalid element requested")
collection = nested_lookup(noderesources, collectionpath[2:])
if len(collectionpath) == 2 and collectionpath[0] == 'noderange':
collection['nodes'] = {}
if not isinstance(collection, dict):
raise exc.NotFoundException("Invalid element requested")
return iterate_resources(collection)
def create_group(inputdata, configmanager):
try:
groupname = inputdata['name']
del inputdata['name']
attribmap = {groupname: inputdata}
except KeyError:
raise exc.InvalidArgumentException()
try:
configmanager.add_group_attributes(attribmap)
except ValueError as e:
raise exc.InvalidArgumentException(str(e))
def create_node(inputdata, configmanager):
try:
nodename = inputdata['name']
del inputdata['name']
attribmap = {nodename: inputdata}
except KeyError:
raise exc.InvalidArgumentException('name not specified')
try:
configmanager.add_node_attributes(attribmap)
except ValueError as e:
raise exc.InvalidArgumentException(str(e))
def enumerate_collections(collections):
for collection in collections:
yield msg.ChildCollection(collection)
def handle_nodegroup_request(configmanager, inputdata,
pathcomponents, operation):
iscollection = False
if len(pathcomponents) < 2:
if operation == "create":
inputdata = msg.InputAttributes(pathcomponents, inputdata)
create_group(inputdata.attribs, configmanager)
allgroups = list(configmanager.get_groups())
try:
allgroups.sort(key=noderange.humanify_nodename)
except TypeError:
allgroups.sort()
return iterate_collections(allgroups)
elif len(pathcomponents) == 2:
iscollection = True
else:
try:
routespec = nested_lookup(nodegroupresources, pathcomponents[2:])
if isinstance(routespec, dict):
iscollection = True
elif isinstance(routespec, PluginCollection):
iscollection = False # it is a collection, but plugin defined
except KeyError:
raise exc.NotFoundException("Invalid element requested")
if iscollection:
if operation == "delete":
return delete_nodegroup_collection(pathcomponents,
configmanager)
elif operation == "retrieve":
return enumerate_nodegroup_collection(pathcomponents,
configmanager)
else:
raise Exception("TODO")
plugroute = routespec.routeinfo
inputdata = msg.get_input_message(
pathcomponents[2:], operation, inputdata)
if 'handler' in plugroute: # fixed handler definition
hfunc = getattr(pluginmap[plugroute['handler']], operation)
return hfunc(
nodes=None, element=pathcomponents,
configmanager=configmanager,
inputdata=inputdata)
raise Exception("unknown case encountered")
def handle_node_request(configmanager, inputdata, operation,
pathcomponents):
iscollection = False
routespec = None
if pathcomponents[0] == 'noderange':
if len(pathcomponents) > 3 and pathcomponents[2] == 'nodes':
# transform into a normal looking node request
# this does mean we don't see if it is a valid
# child, but that's not a goal for the noderange
# facility anyway
isnoderange = False
pathcomponents = pathcomponents[2:]
else:
isnoderange = True
else:
isnoderange = False
try:
nodeorrange = pathcomponents[1]
if not isnoderange and not configmanager.is_node(nodeorrange):
raise exc.NotFoundException("Invalid Node")
if isnoderange:
try:
nodes = noderange.NodeRange(nodeorrange, configmanager).nodes
except Exception as e:
raise exc.NotFoundException("Invalid Noderange: " + str(e))
else:
nodes = (nodeorrange,)
except IndexError: # doesn't actually have a long enough path
# this is enumerating a list of nodes or just empty noderange
if isnoderange and operation == "retrieve":
return iterate_collections([])
elif isnoderange or operation == "delete":
raise exc.InvalidArgumentException()
if operation == "create":
inputdata = msg.InputAttributes(pathcomponents, inputdata)
create_node(inputdata.attribs, configmanager)
allnodes = list(configmanager.list_nodes())
try:
allnodes.sort(key=noderange.humanify_nodename)
except TypeError:
allnodes.sort()
return iterate_collections(allnodes)
if isnoderange and len(pathcomponents) == 3 and pathcomponents[2] == 'nodes':
# this means that it's a list of relevant nodes
nodes = list(nodes)
try:
nodes.sort(key=noderange.humanify_nodename)
except TypeError:
nodes.sort()
return iterate_collections(nodes)
if len(pathcomponents) == 2:
iscollection = True
else:
try:
routespec = nested_lookup(noderesources, pathcomponents[2:])
except KeyError:
raise exc.NotFoundException("Invalid element requested")
if isinstance(routespec, dict):
iscollection = True
elif isinstance(routespec, PluginCollection):
iscollection = False # it is a collection, but plugin defined
if iscollection:
if operation == "delete":
return delete_node_collection(pathcomponents, configmanager)
elif operation == "retrieve":
return enumerate_node_collection(pathcomponents, configmanager)
else:
raise Exception("TODO here")
del pathcomponents[0:2]
passvalues = []
plugroute = routespec.routeinfo
inputdata = msg.get_input_message(
pathcomponents, operation, inputdata, nodes)
if 'handler' in plugroute: # fixed handler definition, easy enough
hfunc = getattr(pluginmap[plugroute['handler']], operation)
passvalue =hfunc(
nodes=nodes, element=pathcomponents,
configmanager=configmanager,
inputdata=inputdata)
if isnoderange:
return passvalue
else:
return stripnode(passvalue, nodes[0])
elif 'pluginattrs' in plugroute:
nodeattr = configmanager.get_node_attributes(
nodes, plugroute['pluginattrs'])
plugpath = None
if 'default' in plugroute:
plugpath = plugroute['default']
nodesbyhandler = {}
for node in nodes:
for attrname in plugroute['pluginattrs']:
if attrname in nodeattr[node]:
plugpath = nodeattr[node][attrname]['value']
if plugpath is not None:
hfunc = getattr(pluginmap[plugpath], operation)
if hfunc in nodesbyhandler:
nodesbyhandler[hfunc].append(node)
else:
nodesbyhandler[hfunc] = [node]
for hfunc in nodesbyhandler:
passvalues.append(hfunc(
nodes=nodesbyhandler[hfunc], element=pathcomponents,
configmanager=configmanager,
inputdata=inputdata))
if isnoderange:
return itertools.chain(*passvalues)
elif isinstance(passvalues[0], console.Console):
return passvalues[0]
else:
return stripnode(passvalues[0], nodes[0])
def handle_path(path, operation, configmanager, inputdata=None):
"""Given a full path request, return an object.
The plugins should generally return some sort of iterator.
An exception is made for console/session, which should return
a class with connect(), read(), write(bytes), and close()
"""
pathcomponents = path.split('/')
del pathcomponents[0] # discard the value from leading /
if pathcomponents[-1] == '':
del pathcomponents[-1]
if not pathcomponents: # root collection list
return enumerate_collections(rootcollections)
elif pathcomponents[0] == 'noderange':
return handle_node_request(configmanager, inputdata, operation,
pathcomponents)
elif pathcomponents[0] == 'nodegroups':
return handle_nodegroup_request(configmanager, inputdata,
pathcomponents,
operation)
elif pathcomponents[0] == 'nodes':
#single node request of some sort
return handle_node_request(configmanager, inputdata,
operation, pathcomponents)
elif pathcomponents[0] == 'users':
#TODO: when non-administrator accounts exist,
# they must only be allowed to see their own user
try:
user = pathcomponents[1]
except IndexError: # it's just users/
if operation == 'create':
inputdata = msg.get_input_message(
pathcomponents, operation, inputdata)
create_user(inputdata.attribs, configmanager)
return iterate_collections(configmanager.list_users(),
forcecollection=False)
if user not in configmanager.list_users():
raise exc.NotFoundException("Invalid user %s" % user)
if operation == 'retrieve':
return show_user(user, configmanager)
elif operation == 'delete':
return delete_user(user, configmanager)
elif operation == 'update':
inputdata = msg.get_input_message(
pathcomponents, operation, inputdata)
update_user(user, inputdata.attribs, configmanager)
return show_user(user, configmanager)
else:
raise exc.NotFoundException()
| {
"content_hash": "e3d0c1bc3aca79d23cd7a7e49d61352d",
"timestamp": "",
"source": "github",
"line_count": 483,
"max_line_length": 81,
"avg_line_length": 36.56521739130435,
"alnum_prop": 0.6043825377951418,
"repo_name": "michaelfardu/thinkconfluent",
"id": "b97ac7a4979e5ad1b4e5c13f892c6a3f1e2475bc",
"size": "19135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "confluent_server/confluent/core.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2464"
},
{
"name": "HTML",
"bytes": "352"
},
{
"name": "JavaScript",
"bytes": "167292"
},
{
"name": "Perl",
"bytes": "8853"
},
{
"name": "Python",
"bytes": "282541"
},
{
"name": "Shell",
"bytes": "1591"
}
],
"symlink_target": ""
} |
import board as b
import list as l
import card as c
class Project(object):
def __init__(self, name):
self.name = name
self.boards = []
# self.cards = []
self.description = ""
self.start_date = ""
self.end_date = ""
self.prefix = self.name[0:3].upper()
def addBoard(self, board_name):
self.boards.append(b.Board(board_name))
return None
def getListOfBoardNames(self):
ret = []
for board in self.boards:
ret.append(board.name)
return ret
def getDocument(self):
document = {
"name": self.name,
"boards": self.boards,
"description": self.description,
"prefix": self.prefix
}
return document
| {
"content_hash": "3e7341b0890b97170a2ed3990fc138dc",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 41,
"avg_line_length": 17.324324324324323,
"alnum_prop": 0.6536661466458659,
"repo_name": "matthew-nolan/cardception",
"id": "6aa3054c3a7ff212f42bd89e4c1c16191376d601",
"size": "641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "25334"
}
],
"symlink_target": ""
} |
from google.cloud import networkconnectivity_v1
async def sample_delete_spoke():
# Create a client
client = networkconnectivity_v1.HubServiceAsyncClient()
# Initialize request argument(s)
request = networkconnectivity_v1.DeleteSpokeRequest(
name="name_value",
)
# Make the request
operation = client.delete_spoke(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END networkconnectivity_v1_generated_HubService_DeleteSpoke_async]
| {
"content_hash": "8ed728b18220d7d29dccf7ff11bea9e3",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 69,
"avg_line_length": 25.217391304347824,
"alnum_prop": 0.7206896551724138,
"repo_name": "googleapis/python-network-connectivity",
"id": "a964fe348735522c83f5d51ccace3f7a8d279f45",
"size": "1983",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/networkconnectivity_v1_generated_hub_service_delete_spoke_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "795166"
},
{
"name": "Shell",
"bytes": "30702"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import json
import re
import unittest
from unittest import mock
from unittest.mock import patch
import pytest
from parameterized import parameterized
from trino.transaction import IsolationLevel
from airflow import AirflowException
from airflow.models import Connection
from airflow.providers.trino.hooks.trino import TrinoHook
HOOK_GET_CONNECTION = "airflow.providers.trino.hooks.trino.TrinoHook.get_connection"
BASIC_AUTHENTICATION = "airflow.providers.trino.hooks.trino.trino.auth.BasicAuthentication"
KERBEROS_AUTHENTICATION = "airflow.providers.trino.hooks.trino.trino.auth.KerberosAuthentication"
TRINO_DBAPI_CONNECT = "airflow.providers.trino.hooks.trino.trino.dbapi.connect"
JWT_AUTHENTICATION = "airflow.providers.trino.hooks.trino.trino.auth.JWTAuthentication"
CERT_AUTHENTICATION = "airflow.providers.trino.hooks.trino.trino.auth.CertificateAuthentication"
class TestTrinoHookConn:
@patch(BASIC_AUTHENTICATION)
@patch(TRINO_DBAPI_CONNECT)
@patch(HOOK_GET_CONNECTION)
def test_get_conn_basic_auth(self, mock_get_connection, mock_connect, mock_basic_auth):
self.set_get_connection_return_value(mock_get_connection, password="password")
TrinoHook().get_conn()
self.assert_connection_called_with(mock_connect, auth=mock_basic_auth)
mock_basic_auth.assert_called_once_with("login", "password")
@patch("airflow.providers.trino.hooks.trino.generate_trino_client_info")
@patch(BASIC_AUTHENTICATION)
@patch(TRINO_DBAPI_CONNECT)
@patch(HOOK_GET_CONNECTION)
def test_http_headers(
self,
mock_get_connection,
mock_connect,
mock_basic_auth,
mocked_generate_airflow_trino_client_info_header,
):
mock_get_connection.return_value = Connection(
login="login", password="password", host="host", schema="hive"
)
client = json.dumps(
{
"dag_id": "dag-id",
"execution_date": "2022-01-01T00:00:00",
"task_id": "task-id",
"try_number": "1",
"dag_run_id": "dag-run-id",
"dag_owner": "dag-owner",
},
sort_keys=True,
)
http_headers = {"X-Trino-Client-Info": client}
mocked_generate_airflow_trino_client_info_header.return_value = http_headers["X-Trino-Client-Info"]
conn = TrinoHook().get_conn()
self.assert_connection_called_with(mock_connect, auth=mock_basic_auth, http_headers=http_headers)
mock_basic_auth.assert_called_once_with("login", "password")
assert mock_connect.return_value == conn
@patch(HOOK_GET_CONNECTION)
def test_get_conn_invalid_auth(self, mock_get_connection):
extras = {"auth": "kerberos"}
self.set_get_connection_return_value(
mock_get_connection,
password="password",
extra=json.dumps(extras),
)
with pytest.raises(
AirflowException, match=re.escape("The 'kerberos' authorization type doesn't support password.")
):
TrinoHook().get_conn()
@patch(JWT_AUTHENTICATION)
@patch(TRINO_DBAPI_CONNECT)
@patch(HOOK_GET_CONNECTION)
def test_get_conn_jwt_auth(self, mock_get_connection, mock_connect, mock_jwt_auth):
extras = {
"auth": "jwt",
"jwt__token": "TEST_JWT_TOKEN",
}
self.set_get_connection_return_value(
mock_get_connection,
extra=json.dumps(extras),
)
TrinoHook().get_conn()
self.assert_connection_called_with(mock_connect, auth=mock_jwt_auth)
@patch(CERT_AUTHENTICATION)
@patch(TRINO_DBAPI_CONNECT)
@patch(HOOK_GET_CONNECTION)
def test_get_conn_cert_auth(self, mock_get_connection, mock_connect, mock_cert_auth):
extras = {
"auth": "certs",
"certs__client_cert_path": "/path/to/client.pem",
"certs__client_key_path": "/path/to/client.key",
}
self.set_get_connection_return_value(
mock_get_connection,
extra=json.dumps(extras),
)
TrinoHook().get_conn()
self.assert_connection_called_with(mock_connect, auth=mock_cert_auth)
mock_cert_auth.assert_called_once_with("/path/to/client.pem", "/path/to/client.key")
@patch(KERBEROS_AUTHENTICATION)
@patch(TRINO_DBAPI_CONNECT)
@patch(HOOK_GET_CONNECTION)
def test_get_conn_kerberos_auth(self, mock_get_connection, mock_connect, mock_auth):
extras = {
"auth": "kerberos",
"kerberos__config": "TEST_KERBEROS_CONFIG",
"kerberos__service_name": "TEST_SERVICE_NAME",
"kerberos__mutual_authentication": "TEST_MUTUAL_AUTHENTICATION",
"kerberos__force_preemptive": True,
"kerberos__hostname_override": "TEST_HOSTNAME_OVERRIDE",
"kerberos__sanitize_mutual_error_response": True,
"kerberos__principal": "TEST_PRINCIPAL",
"kerberos__delegate": "TEST_DELEGATE",
"kerberos__ca_bundle": "TEST_CA_BUNDLE",
"verify": "true",
}
self.set_get_connection_return_value(
mock_get_connection,
extra=json.dumps(extras),
)
TrinoHook().get_conn()
self.assert_connection_called_with(mock_connect, auth=mock_auth)
@patch(HOOK_GET_CONNECTION)
@patch(TRINO_DBAPI_CONNECT)
def test_get_conn_session_properties(self, mock_connect, mock_get_connection):
extras = {
"session_properties": {
"scale_writers": "true",
"task_writer_count": "1",
"writer_min_size": "100MB",
},
}
self.set_get_connection_return_value(mock_get_connection, extra=extras)
TrinoHook().get_conn()
self.assert_connection_called_with(mock_connect, session_properties=extras["session_properties"])
@parameterized.expand(
[
("False", False),
("false", False),
("true", True),
("true", True),
("/tmp/cert.crt", "/tmp/cert.crt"),
]
)
@patch(HOOK_GET_CONNECTION)
@patch(TRINO_DBAPI_CONNECT)
def test_get_conn_verify(self, current_verify, expected_verify, mock_connect, mock_get_connection):
extras = {"verify": current_verify}
self.set_get_connection_return_value(mock_get_connection, extra=json.dumps(extras))
TrinoHook().get_conn()
self.assert_connection_called_with(mock_connect, verify=expected_verify)
@staticmethod
def set_get_connection_return_value(mock_get_connection, extra=None, password=None):
mocked_connection = Connection(
login="login", password=password, host="host", schema="hive", extra=extra or "{}"
)
mock_get_connection.return_value = mocked_connection
@staticmethod
def assert_connection_called_with(
mock_connect, http_headers=mock.ANY, auth=None, verify=True, session_properties=None
):
mock_connect.assert_called_once_with(
catalog="hive",
host="host",
port=None,
http_scheme="http",
http_headers=http_headers,
schema="hive",
source="airflow",
user="login",
isolation_level=IsolationLevel.AUTOCOMMIT,
auth=None if not auth else auth.return_value,
verify=verify,
session_properties=session_properties,
)
class TestTrinoHook(unittest.TestCase):
def setUp(self):
super().setUp()
self.cur = mock.MagicMock(rowcount=0)
self.conn = mock.MagicMock()
self.conn.cursor.return_value = self.cur
conn = self.conn
class UnitTestTrinoHook(TrinoHook):
conn_name_attr = "test_conn_id"
def get_conn(self):
return conn
def get_isolation_level(self):
return IsolationLevel.READ_COMMITTED
self.db_hook = UnitTestTrinoHook()
@patch("airflow.providers.common.sql.hooks.sql.DbApiHook.insert_rows")
def test_insert_rows(self, mock_insert_rows):
table = "table"
rows = [("hello",), ("world",)]
target_fields = None
commit_every = 10
replace = True
self.db_hook.insert_rows(table, rows, target_fields, commit_every, replace)
mock_insert_rows.assert_called_once_with(table, rows, None, 10, True)
def test_get_first_record(self):
statement = "SQL"
result_sets = [("row1",), ("row2",)]
self.cur.fetchone.return_value = result_sets[0]
assert result_sets[0] == self.db_hook.get_first(statement)
self.conn.close.assert_called_once_with()
self.cur.close.assert_called_once_with()
self.cur.execute.assert_called_once_with(statement)
def test_get_records(self):
statement = "SQL"
result_sets = [("row1",), ("row2",)]
self.cur.fetchall.return_value = result_sets
assert result_sets == self.db_hook.get_records(statement)
self.conn.close.assert_called_once_with()
self.cur.close.assert_called_once_with()
self.cur.execute.assert_called_once_with(statement)
def test_get_pandas_df(self):
statement = "SQL"
column = "col"
result_sets = [("row1",), ("row2",)]
self.cur.description = [(column,)]
self.cur.fetchall.return_value = result_sets
df = self.db_hook.get_pandas_df(statement)
assert column == df.columns[0]
assert result_sets[0][0] == df.values.tolist()[0][0]
assert result_sets[1][0] == df.values.tolist()[1][0]
self.cur.execute.assert_called_once_with(statement, None)
@patch("airflow.providers.trino.hooks.trino.TrinoHook.run")
def test_run(self, mock_run):
sql = "SELECT 1"
autocommit = False
parameters = ("hello", "world")
handler = list
self.db_hook.run(sql, autocommit, parameters, list)
mock_run.assert_called_once_with(sql, autocommit, parameters, handler)
def test_connection_success(self):
status, msg = self.db_hook.test_connection()
assert status is True
assert msg == "Connection successfully tested"
@patch("airflow.providers.trino.hooks.trino.TrinoHook.get_conn")
def test_connection_failure(self, mock_conn):
mock_conn.side_effect = Exception("Test")
self.db_hook.get_conn = mock_conn
status, msg = self.db_hook.test_connection()
assert status is False
assert msg == "Test"
class TestTrinoHookIntegration(unittest.TestCase):
@pytest.mark.integration("trino")
@mock.patch.dict("os.environ", AIRFLOW_CONN_TRINO_DEFAULT="trino://airflow@trino:8080/")
def test_should_record_records(self):
hook = TrinoHook()
sql = "SELECT name FROM tpch.sf1.customer ORDER BY custkey ASC LIMIT 3"
records = hook.get_records(sql)
assert [["Customer#000000001"], ["Customer#000000002"], ["Customer#000000003"]] == records
@pytest.mark.integration("trino")
@pytest.mark.integration("kerberos")
def test_should_record_records_with_kerberos_auth(self):
conn_url = (
"trino://airflow@trino.example.com:7778/?"
"auth=kerberos&kerberos__service_name=HTTP&"
"verify=False&"
"protocol=https"
)
with mock.patch.dict("os.environ", AIRFLOW_CONN_TRINO_DEFAULT=conn_url):
hook = TrinoHook()
sql = "SELECT name FROM tpch.sf1.customer ORDER BY custkey ASC LIMIT 3"
records = hook.get_records(sql)
assert [["Customer#000000001"], ["Customer#000000002"], ["Customer#000000003"]] == records
| {
"content_hash": "b06fb1d00e1686dff774413c40188e25",
"timestamp": "",
"source": "github",
"line_count": 309,
"max_line_length": 108,
"avg_line_length": 38.22006472491909,
"alnum_prop": 0.622099915325995,
"repo_name": "nathanielvarona/airflow",
"id": "a30087952089d0985c550fd7bbc1ddc3464151f9",
"size": "12597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/providers/trino/hooks/test_trino.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "70681"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173025"
},
{
"name": "JavaScript",
"bytes": "142848"
},
{
"name": "Jinja",
"bytes": "38895"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23169682"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211967"
},
{
"name": "TypeScript",
"bytes": "484556"
}
],
"symlink_target": ""
} |
"""
Collection of functions related to geographical datasets.
Especially:
* converting coordinates between coordinate systems
* getting the value at a specific point of a dataset
"""
# greatly inspired from
# http://gis.stackexchange.com/questions/6669/converting-projected-geotiff-to-wgs84-with-gdal-and-python
# and http://gis.stackexchange.com/questions/29632/raster-how-to-get-elevation-at-lat-long-using-python
import logging
import os
import numpy as np
from osgeo import osr
LOGGER = logging.getLogger(os.path.basename(__file__))
def transform_from_wgs84(projection_ref, wgs84_lat, wgs84_long):
"""
Transforms WGS 84 (GPS) coordinates to the specified coordinate system (WKT).
Be careful as latitude and longitude are swapped in a Cartesian coordinate system (lat is y, and long is x).
:param projection_ref: the specified coordinate system supplied in Well Known Text (WKT) format
:param wgs84_lat: the WGS 84 latitude
:param wgs84_long: the WGS 84 longitude
:return: the couple of transformed coordinates (x, y)
"""
# get the coordinate system of the projection ref
ref_cs = osr.SpatialReference()
ref_cs.ImportFromWkt(projection_ref)
# get the coordinate system of WGS 84/ESPG:4326/'GPS'
wgs84_cs = osr.SpatialReference()
wgs84_cs.ImportFromEPSG(4326)
# create a transform object to convert between coordinate systems
transform = osr.CoordinateTransformation(wgs84_cs, ref_cs)
vectorized_transform = np.vectorize(transform.TransformPoint)
# do the transformation/projection from WGS 84 to the projection ref
ref_point = vectorized_transform(wgs84_long, wgs84_lat)
return ref_point[0], ref_point[1]
def compute_offset(transform, ds_x, ds_y):
"""
Compute the image offset based on the projected coordinates and the transformation.
The transformation performed is the invert transformation that is described on the transform object.
The resulting offsets are floored to int.
Results are valid as long as the transformation is linear (tranform[2] and tranform[4] are 0).
:param transform: the transformation obtained from Dataset::GetGeoTransform.
:param ds_x: the projected x-coordinate
:param ds_y: the projected y-coordinate
:return: the couple of offsets (x, y)
"""
# TODO is this exception really useful?
if transform is None:
raise Exception("Can only handle 'Affine GeoTransforms'")
# TODO tranform[2] and tranform[4] should be checked as equal to 0 (unless raise an error)
# http://www.gdal.org/classGDALDataset.html#af9593cc241e7d140f5f3c4798a43a668
origin_x = transform[0]
origin_y = transform[3]
pixel_width = transform[1]
pixel_height = transform[5]
# do the inverse geo transform, flooring to the int
# TODO better approximation than flooring to the int ?
offset_x = np.floor_divide(ds_x - origin_x, pixel_width).astype(int)
offset_y = np.floor_divide(ds_y - origin_y, pixel_height).astype(int)
return offset_x, offset_y
def read_band_data(band, no_data, offset_x, offset_y):
"""
Read a single value from a band, replacing "NoData" with None
:param band: the band to read data from
:param no_data: the no data value for this band
:param offset_x: the x offset to read data
:param offset_y: the y offset to read data
:return: the value
"""
value = band.ReadAsArray(offset_x, offset_y, 1, 1)[0, 0]
if value is not no_data:
return value
else:
return None
vectorized_read_band_data = np.vectorize(read_band_data, excluded=('band', 'no_data'))
def read_ds_data(data_source, offset_x, offset_y):
"""
Read data from the given data source.
:param data_source: the data source to read data from
:param offset_x: the x offset to read data
:param offset_y: the y offset to read data
:return: the value
"""
band = data_source.GetRasterBand(1) # 1-based index, data shall be in the first band
no_data_value = band.GetNoDataValue()
LOGGER.debug("for this band, no data is: %s", no_data_value)
if np.isscalar(offset_x) and np.isscalar(offset_y):
data = read_band_data(band, no_data_value, offset_x, offset_y)
else:
data = vectorized_read_band_data(band, no_data_value, offset_x, offset_y)
return data
def read_ds_value_from_wgs84(data_source, wgs84_lat, wgs84_long):
"""
Read the ds value at the specified WGS 84 (GPS) coordinates.
:param data_source: the dataset to read the value in
:param wgs84_lat: the WGS 84 latitude
:param wgs84_long: the WGS 84 longitude
:return: the value or None if the specified coordinate is a "no data"
"""
projected_x, projected_y = transform_from_wgs84(data_source.GetProjectionRef(), wgs84_lat, wgs84_long)
LOGGER.debug("projected x: %f, projected y: %f", projected_x, projected_y)
offset_x, offset_y = compute_offset(data_source.GetGeoTransform(), projected_x, projected_y)
LOGGER.debug("offset x: %d, offset y: %d", offset_x, offset_y)
return read_ds_data(data_source, offset_x, offset_y)
| {
"content_hash": "0c437d23871a722c84fea16c5a692228",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 112,
"avg_line_length": 36.6,
"alnum_prop": 0.7051131928181108,
"repo_name": "superbob/YunoSeeMe",
"id": "ea2582e7e7dc3e7ff0e8d70d4e7f77ff73699308",
"size": "5124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geods.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "43399"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(
name='django-jux',
version='1.1.2',
description='JUnit-style XML output for Django tests',
author='Sean Myers',
author_email='sean.dst@gmail.com',
url='https://bitbucket.org/seandst/django-jux',
packages=['juxd'],
license='MIT Expat License',
long_description=open('README.rst').read(),
install_requires=[
'Django >= 1.6',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: DFSG approved',
'License :: Freely Distributable',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Testing',
]
)
| {
"content_hash": "1b05184f978e1e4ca5a7058289e16000",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 58,
"avg_line_length": 32.6551724137931,
"alnum_prop": 0.5987328405491025,
"repo_name": "RideCo/django-jux",
"id": "737aa2d900d50924e1145a836a80b0c7c025e06e",
"size": "947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6042"
}
],
"symlink_target": ""
} |
import arff
import argparse
import ConfigSpace
import copy
import fanova.fanova
import fanova.visualizer
import json
import matplotlib.cm
import matplotlib.pyplot as plt
import numpy as np
import logging
import openmlcontrib
import openmlpimp.configspaces
import os
import sklearnbot
import typing
# to plot: <openml_pimp_root>/examples/plot/plot_fanova_aggregates.py
def read_cmd():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_path', default='../../DS2019/data/resnet.arff', type=str)
parser.add_argument('--output_directory', default=os.path.expanduser('~/experiments/openml-pimp'), type=str)
parser.add_argument('--classifier', default='resnet', type=str)
parser.add_argument('--config_library', default='openmlpimp', type=str)
parser.add_argument('--measure', default='predictive_accuracy', type=str)
parser.add_argument('--plot_marginals', action='store_true', default=True)
parser.add_argument('--plot_extension', default='pdf', type=str)
parser.add_argument('--plot_resolution', default=100, type=int)
parser.add_argument('--hyperparameters', nargs='+', default=[
'epochs',
'momentum',
'learning_rate_init',
'weight_decay',
'epochs__learning_rate_init',
'epochs__weight_decay',
'learning_rate_init__momentum',
])
parser.add_argument('--n_trees', default=16, type=int)
parser.add_argument('--resolution', default=100, type=int)
parser.add_argument('--task_id', default=None, type=str)
parser.add_argument('--task_id_column', default='dataset', type=str)
parser.add_argument('--show_legend', action='store_true')
parser.add_argument('--tick_size', default=12, type=int)
parser.add_argument('--label_size', default=14, type=int)
parser.add_argument('--subsample', default=None, type=int)
args_, misc = parser.parse_known_args()
return args_
def apply_logscale(X: np.array, config_space: ConfigSpace.ConfigurationSpace):
X_prime = np.array(X)
config_space_prime = copy.deepcopy(config_space)
for idx, hp in enumerate(config_space_prime.get_hyperparameters()):
if isinstance(hp, ConfigSpace.hyperparameters.NumericalHyperparameter):
if hp.log:
X_prime[:, idx] = np.log(X_prime[:, idx])
hp.lower = np.log(hp.lower)
hp.upper = np.log(hp.upper)
hp.log = False
for idx, hp in enumerate(config_space_prime.get_hyperparameters()):
if isinstance(hp, ConfigSpace.hyperparameters.NumericalHyperparameter):
lowest = np.min(X_prime[:, idx])
highest = np.max(X_prime[:, idx])
assert hp.lower <= lowest <= highest <= hp.upper
assert hp.log is False
return X_prime, config_space_prime
def plot_single_marginal(X: np.array,
y: np.array,
config_space: ConfigSpace.ConfigurationSpace,
name_prefix: str,
hyperparameter_name: str,
directory: str,
y_range: typing.Optional[typing.Tuple[int, int]],
measure_name: str,
n_trees: int,
resolution: int,
tick_size: int,
label_size: int,
show_legend: bool,
plot_extension: str):
evaluator = fanova.fanova.fANOVA(X=X, Y=y, config_space=config_space, n_trees=n_trees)
visualizer = fanova.visualizer.Visualizer(evaluator, config_space, '/tmp/', y_label=measure_name)
plt.close('all')
plt.clf()
plt.rc('xtick', labelsize=tick_size)
plt.rc('ytick', labelsize=tick_size)
plt.rc('axes', labelsize=label_size)
hyperparameter_idx = config_space.get_idx_by_hyperparameter_name(hyperparameter_name)
os.makedirs(directory, exist_ok=True)
outfile_name = os.path.join(directory, '%s__%s.%s' % (name_prefix,
hyperparameter_name.replace(os.sep, "_"),
plot_extension))
visualizer.plot_marginal(hyperparameter_idx, resolution=resolution, show=False)
x1, x2, _, _ = plt.axis()
if y_range:
plt.axis((x1, x2, y_range[0], y_range[1]))
ax = plt.gca()
ax.set_xlabel(hyperparameter_name.replace('_', ' ').capitalize())
ax.set_ylabel(measure_name.replace('_', ' ').capitalize())
if not show_legend and ax.get_legend() is not None:
ax.get_legend().remove()
plt.tight_layout()
plt.savefig(outfile_name)
logging.info('saved marginal plot to: %s' % outfile_name)
def plot_pairwise_marginal(X: np.array,
y: np.array,
config_space: ConfigSpace.ConfigurationSpace,
name_prefix: str,
hyperparameter_names: typing.Tuple[str],
directory: str,
z_range: typing.Optional[typing.Tuple[int, int]],
measure_name: str,
n_trees: int,
resolution: int,
tick_size: int,
label_size: int,
show_legend: bool,
plot_extension: str):
X_prime, config_space_prime = apply_logscale(X, config_space)
evaluator = fanova.fanova.fANOVA(X=X_prime, Y=y, config_space=config_space_prime, n_trees=n_trees)
visualizer = fanova.visualizer.Visualizer(evaluator, config_space_prime, '/tmp/', y_label=measure_name)
plt.close('all')
plt.clf()
plt.rc('xtick', labelsize=tick_size)
plt.rc('ytick', labelsize=tick_size)
plt.rc('axes', labelsize=label_size)
if len(hyperparameter_names) != 2:
raise ValueError()
idx1 = config_space.get_idx_by_hyperparameter_name(hyperparameter_names[0])
idx2 = config_space.get_idx_by_hyperparameter_name(hyperparameter_names[1])
indices = [(idx1, idx2), (idx2, idx1)]
for hp1_hp2 in indices:
hp1_name = config_space_prime.get_hyperparameter_by_idx(hp1_hp2[0])
hp2_name = config_space_prime.get_hyperparameter_by_idx(hp1_hp2[1])
os.makedirs(directory, exist_ok=True)
outfile_name = os.path.join(directory, '%s__%s__%s.%s' % (name_prefix,
hp1_name.replace(os.sep, "_"),
hp2_name.replace(os.sep, "_"),
plot_extension))
try:
visualizer.plot_pairwise_marginal(hp1_hp2, resolution=resolution, show=False,
colormap=matplotlib.cm.viridis, add_colorbar=False)
ax = plt.gca()
if z_range:
ax.set_zlim3d(z_range[0], z_range[1])
# note that we use original config space
xlabel_log_str = ' (log)' if config_space.get_hyperparameter(hp1_name).log else ''
ylabel_log_str = ' (log)' if config_space.get_hyperparameter(hp2_name).log else ''
ax.set_xlabel(hp1_name.replace('_', ' ').capitalize() + xlabel_log_str)
ax.set_ylabel(hp2_name.replace('_', ' ').capitalize() + ylabel_log_str)
ax.set_zlabel(measure_name.replace('_', ' ').capitalize())
if not show_legend and ax.get_legend() is not None:
ax.get_legend().remove()
plt.savefig(outfile_name, bbox_inches='tight')
logging.info('saved marginal plot (3D) to: %s' % outfile_name)
except IndexError as e:
logging.warning('IndexError with hyperparameters %s and %s: %s' % (hp1_name, hp2_name, e))
def get_dataset_metadata(dataset_path):
with open(dataset_path) as fp:
first_line = fp.readline()
if first_line[0] != '%':
raise ValueError('arff data file should start with comment for meta-data')
meta_data = json.loads(first_line[1:])
return meta_data
def run(args):
root = logging.getLogger()
root.setLevel(logging.INFO)
logging.info('Start %s: %s' % (os.path.basename(__file__), vars(args)))
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
with open(args.dataset_path, 'r') as fp:
arff_dataset = arff.load(fp)
if args.config_library == 'sklearnbot':
config_space = sklearnbot.config_spaces.get_config_space(args.classifier, None)
elif args.config_library == 'openmlpimp':
config_space = openmlpimp.configspaces.get_config_space(args.classifier, None)
else:
raise ValueError('Could not identify config library: %s' % args.config_library)
data = openmlcontrib.meta.arff_to_dataframe(arff_dataset, config_space)
data = openmlcontrib.meta.integer_encode_dataframe(data, config_space)
meta_data = get_dataset_metadata(args.dataset_path)
if args.measure not in data.columns.values:
raise ValueError('Could not find measure in dataset: %s' % args.measure)
if set(config_space.get_hyperparameter_names()) != set(meta_data['col_parameters']):
missing_cs = set(meta_data['col_parameters']) - set(config_space.get_hyperparameter_names())
missing_ds = set(config_space.get_hyperparameter_names()) - set(meta_data['col_parameters'])
raise ValueError('ConfigSpace and hyperparameters of dataset do not '
'align. ConfigSpace misses: %s, dataset misses: %s' % (missing_cs, missing_ds))
task_ids = data[args.task_id_column].unique()
if args.task_id:
task_ids = [args.task_id]
for t_idx, task_id in enumerate(task_ids):
logging.info('Running fanova on task %s (%d/%d)' % (task_id, t_idx + 1, len(task_ids)))
data_task = data[data[args.task_id_column] == task_id]
del data_task[args.task_id_column]
# now dataset is gone, and all categoricals are converted, we can convert to float
data_task = data_task.astype(np.float)
if args.subsample:
indices = np.random.choice(len(data_task), args.subsample, replace=False)
data_task = data_task.iloc[indices]
logging.info('Dimensions: %s (out of (%s)) %s' % (str(data_task.shape),
str(data.shape),
'[Subsampled]' if args.subsample else ''))
assert len(data_task) >= min(100, args.subsample if args.subsample is not None else 100)
os.makedirs(args.output_directory, exist_ok=True)
X_data = data_task[config_space.get_hyperparameter_names()].values
y_data = data_task[args.measure].values
for hyperparameters_str in args.hyperparameters:
hyperparameters = hyperparameters_str.split('__')
logging.info('-- Starting with: %s' % hyperparameters)
if len(hyperparameters) == 1:
plot_single_marginal(
X=X_data, y=y_data,
config_space=config_space,
name_prefix=task_id,
hyperparameter_name=hyperparameters[0],
directory=os.path.join(args.output_directory, 'marginal_plots'),
y_range=None,
measure_name=args.measure,
n_trees=args.n_trees,
resolution=args.plot_resolution,
tick_size=args.tick_size,
label_size=args.label_size,
show_legend=args.show_legend,
plot_extension=args.plot_extension,
)
elif len(hyperparameters) == 2:
plot_pairwise_marginal(
X=X_data,
y=y_data,
config_space=config_space,
name_prefix=task_id,
hyperparameter_names=hyperparameters,
directory=os.path.join(args.output_directory, 'marginal_plots'),
z_range=None,
measure_name=args.measure,
n_trees=args.n_trees,
resolution=args.plot_resolution,
tick_size=args.tick_size,
label_size=args.label_size,
show_legend=args.show_legend,
plot_extension=args.plot_extension,
)
else:
raise ValueError('No support yet for higher dimensions than 2. Got: %d' % len(hyperparameters))
if __name__ == '__main__':
run(read_cmd())
| {
"content_hash": "a9c0cd42b8fb5895434138d3f6d81f41",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 112,
"avg_line_length": 47.11439114391144,
"alnum_prop": 0.5762061403508771,
"repo_name": "janvanrijn/openml-pimp",
"id": "75edd088d5249e93011503fa8fb274b844240152",
"size": "12768",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/plot/plot_fanova_marginals.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "33"
},
{
"name": "Jupyter Notebook",
"bytes": "15643"
},
{
"name": "Python",
"bytes": "84680"
}
],
"symlink_target": ""
} |
"""Tests for model sparsity handling / switching."""
import pytest
import numpy as np
from parameters import T_VALUES, KPT
import tbmodels
@pytest.mark.parametrize("t1", T_VALUES)
def test_simple(t1, get_model):
"""
Check that a simple model set up as sparse and dense creates
the same Hamiltonians.
"""
model_dense = get_model(*t1, sparse=True)
model_sparse = get_model(*t1, sparse=False)
for k in KPT:
assert np.isclose(model_dense.hamilton(k), model_sparse.hamilton(k)).all()
@pytest.mark.parametrize("t1", T_VALUES)
def test_change_to_dense(t1, get_model, models_close):
"""
Check that creating a sparse model and then switching it to dense
creates the same result as directly creating a dense model.
"""
model1 = get_model(*t1, sparse=True)
model2 = get_model(*t1, sparse=False)
model1.set_sparse(False)
assert models_close(model1, model2)
@pytest.mark.parametrize("t1", T_VALUES)
def test_change_to_sparse(t1, get_model, models_close):
"""
Check that creating a dense model and then switching it to sparse
creates the same result as directly creating a sparse model.
"""
model1 = get_model(*t1, sparse=True)
model2 = get_model(*t1, sparse=False)
model2.set_sparse(True)
assert models_close(model1, model2)
@pytest.mark.parametrize(
"hr_name", ["hr_hamilton.dat", "wannier90_hr.dat", "wannier90_hr_v2.dat"]
)
def test_hr(hr_name, sample):
"""
Check that models loaded from *_hr.dat format have the same Hamiltonians
when loaded as either sparse or dense models.
"""
hr_file = sample(hr_name)
model1 = tbmodels.Model.from_wannier_files(hr_file=hr_file, occ=28, sparse=False)
model2 = tbmodels.Model.from_wannier_files(hr_file=hr_file, occ=28, sparse=True)
for k in KPT:
assert np.isclose(model1.hamilton(k), model2.hamilton(k)).all()
| {
"content_hash": "134edc65d8363c1eb393ee2dddc04f9d",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 85,
"avg_line_length": 31.065573770491802,
"alnum_prop": 0.6849604221635884,
"repo_name": "Z2PackDev/TBmodels",
"id": "8878e753fccfd7cb8094529823c857f5266df809",
"size": "2023",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "tests/test_sparse_dense.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "206314"
},
{
"name": "Shell",
"bytes": "803"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('problem', '0003_auto_20170429_1802'),
]
operations = [
migrations.RemoveField(
model_name='problem',
name='problem_desc',
),
migrations.AlterField(
model_name='problem',
name='tags',
field=models.ManyToManyField(related_name='problems', to='problem.ProblemTag', blank=True),
),
]
| {
"content_hash": "31ce14971e8d3288e60300555c3c8010",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 103,
"avg_line_length": 24.545454545454547,
"alnum_prop": 0.5851851851851851,
"repo_name": "io07/BOJ-V4",
"id": "7c770a7d8b63d2b50eb5422b4ea9a7d573534a44",
"size": "564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "problem/migrations/0004_auto_20170502_2317.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1937"
},
{
"name": "CSS",
"bytes": "1456184"
},
{
"name": "HTML",
"bytes": "114461"
},
{
"name": "JavaScript",
"bytes": "9818915"
},
{
"name": "PHP",
"bytes": "15921"
},
{
"name": "Python",
"bytes": "210352"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
from ..base import ServerFrame
class Receipt(ServerFrame):
abstract = True
class Receipt10(Receipt):
version = '1.0'
verb = 'RECEIPT'
headers_required = ('receipt-id', )
class Receipt11(Receipt10):
version = '1.1'
class Receipt12(Receipt11):
version = '1.2'
| {
"content_hash": "a5342d4ae30ddba54cceedca8560cc6e",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 39,
"avg_line_length": 19.12,
"alnum_prop": 0.694560669456067,
"repo_name": "skippyprime/stimpi",
"id": "8e4f4319e2ed93ba7dbaca33da35f9698aa15f12",
"size": "1058",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stimpi/frames/impl/receipt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "107556"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='item',
fields=[
('item_id', models.CharField(max_length=20, primary_key=True, serialize=False)),
('item_name', models.CharField(max_length=50)),
('image', models.ImageField(upload_to='')),
('otc_or_not', models.BooleanField()),
('brand_name', models.CharField(max_length=50)),
('salts', models.TextField()),
('specifications', models.CharField(max_length=100)),
('category', models.CharField(max_length=30)),
],
),
]
| {
"content_hash": "d44c06cb68174294f5ec299dc9d9d6d1",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 96,
"avg_line_length": 30.444444444444443,
"alnum_prop": 0.5413625304136253,
"repo_name": "mpiplani/Online-Pharmacy",
"id": "d9f1e20005370a1fb71438c09656f8afd24d43a8",
"size": "893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "online_pharmacy/online_pharmacy/items/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3237"
},
{
"name": "Python",
"bytes": "51456"
}
],
"symlink_target": ""
} |
"""Auto-generated file, do not edit by hand. KN metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_KN = PhoneMetadata(id='KN', country_code=1, international_prefix='011',
general_desc=PhoneNumberDesc(national_number_pattern='[589]\\d{9}', possible_number_pattern='\\d{7}(?:\\d{3})?', possible_length=(10,), possible_length_local_only=(7,)),
fixed_line=PhoneNumberDesc(national_number_pattern='869(?:2(?:29|36)|302|4(?:6[015-9]|70))\\d{4}', example_number='8692361234', possible_length=(10,), possible_length_local_only=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='869(?:5(?:5[6-8]|6[5-7])|66\\d|76[02-7])\\d{4}', example_number='8697652917', possible_length=(10,), possible_length_local_only=(7,)),
toll_free=PhoneNumberDesc(national_number_pattern='8(?:00|33|44|55|66|77|88)[2-9]\\d{6}', possible_number_pattern='\\d{10}', example_number='8002123456', possible_length=(10,)),
premium_rate=PhoneNumberDesc(national_number_pattern='900[2-9]\\d{6}', possible_number_pattern='\\d{10}', example_number='9002123456', possible_length=(10,)),
shared_cost=PhoneNumberDesc(),
personal_number=PhoneNumberDesc(national_number_pattern='5(?:00|22|33|44|66|77|88)[2-9]\\d{6}', possible_number_pattern='\\d{10}', example_number='5002345678', possible_length=(10,)),
voip=PhoneNumberDesc(),
pager=PhoneNumberDesc(),
uan=PhoneNumberDesc(),
voicemail=PhoneNumberDesc(),
no_international_dialling=PhoneNumberDesc(),
national_prefix='1',
national_prefix_for_parsing='1',
leading_digits='869')
| {
"content_hash": "759ac8a71a742cb8de1884a2562ddea5",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 188,
"avg_line_length": 83.84210526315789,
"alnum_prop": 0.7011927181418707,
"repo_name": "vicky2135/lucious",
"id": "8932b97f685cd16039ba873a2d319e96cb3f191f",
"size": "1593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oscar/lib/python2.7/site-packages/phonenumbers/data/region_KN.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "896683"
},
{
"name": "C++",
"bytes": "52230"
},
{
"name": "CSS",
"bytes": "1169533"
},
{
"name": "HTML",
"bytes": "1104983"
},
{
"name": "JavaScript",
"bytes": "1055140"
},
{
"name": "Makefile",
"bytes": "145238"
},
{
"name": "Python",
"bytes": "55993261"
},
{
"name": "Shell",
"bytes": "40487"
}
],
"symlink_target": ""
} |
import functools
import time
import unittest
from concurrent import futures
from mock import mock
from mock import patch
from oslo_serialization import jsonutils
import pika
import oslo_messaging
from oslo_messaging._drivers.pika_driver import pika_engine
from oslo_messaging._drivers.pika_driver import pika_message as pika_drv_msg
class PikaIncomingMessageTestCase(unittest.TestCase):
def setUp(self):
self._pika_engine = mock.Mock()
self._channel = mock.Mock()
self._delivery_tag = 12345
self._method = pika.spec.Basic.Deliver(delivery_tag=self._delivery_tag)
self._properties = pika.BasicProperties(
content_type="application/json",
headers={"version": "1.0"},
)
self._body = (
b'{"_$_key_context":"context_value",'
b'"payload_key": "payload_value"}'
)
def test_message_body_parsing(self):
message = pika_drv_msg.PikaIncomingMessage(
self._pika_engine, self._channel, self._method, self._properties,
self._body
)
self.assertEqual(message.ctxt.get("key_context", None),
"context_value")
self.assertEqual(message.message.get("payload_key", None),
"payload_value")
def test_message_acknowledge(self):
message = pika_drv_msg.PikaIncomingMessage(
self._pika_engine, self._channel, self._method, self._properties,
self._body
)
message.acknowledge()
self.assertEqual(1, self._channel.basic_ack.call_count)
self.assertEqual({"delivery_tag": self._delivery_tag},
self._channel.basic_ack.call_args[1])
def test_message_acknowledge_no_ack(self):
message = pika_drv_msg.PikaIncomingMessage(
self._pika_engine, None, self._method, self._properties,
self._body
)
message.acknowledge()
self.assertEqual(0, self._channel.basic_ack.call_count)
def test_message_requeue(self):
message = pika_drv_msg.PikaIncomingMessage(
self._pika_engine, self._channel, self._method, self._properties,
self._body
)
message.requeue()
self.assertEqual(1, self._channel.basic_nack.call_count)
self.assertEqual({"delivery_tag": self._delivery_tag, 'requeue': True},
self._channel.basic_nack.call_args[1])
def test_message_requeue_no_ack(self):
message = pika_drv_msg.PikaIncomingMessage(
self._pika_engine, None, self._method, self._properties,
self._body
)
message.requeue()
self.assertEqual(0, self._channel.basic_nack.call_count)
class RpcPikaIncomingMessageTestCase(unittest.TestCase):
def setUp(self):
self._pika_engine = mock.Mock()
self._pika_engine.rpc_reply_retry_attempts = 3
self._pika_engine.rpc_reply_retry_delay = 0.25
self._channel = mock.Mock()
self._delivery_tag = 12345
self._method = pika.spec.Basic.Deliver(delivery_tag=self._delivery_tag)
self._body = (
b'{"_$_key_context":"context_value",'
b'"payload_key":"payload_value"}'
)
self._properties = pika.BasicProperties(
content_type="application/json",
content_encoding="utf-8",
headers={"version": "1.0"},
)
def test_call_message_body_parsing(self):
self._properties.correlation_id = 123456789
self._properties.reply_to = "reply_queue"
message = pika_drv_msg.RpcPikaIncomingMessage(
self._pika_engine, self._channel, self._method, self._properties,
self._body
)
self.assertEqual(message.ctxt.get("key_context", None),
"context_value")
self.assertEqual(message.msg_id, 123456789)
self.assertEqual(message.reply_q, "reply_queue")
self.assertEqual(message.message.get("payload_key", None),
"payload_value")
def test_cast_message_body_parsing(self):
message = pika_drv_msg.RpcPikaIncomingMessage(
self._pika_engine, self._channel, self._method, self._properties,
self._body
)
self.assertEqual(message.ctxt.get("key_context", None),
"context_value")
self.assertEqual(message.msg_id, None)
self.assertEqual(message.reply_q, None)
self.assertEqual(message.message.get("payload_key", None),
"payload_value")
@patch(("oslo_messaging._drivers.pika_driver.pika_message."
"PikaOutgoingMessage.send"))
def test_reply_for_cast_message(self, send_reply_mock):
message = pika_drv_msg.RpcPikaIncomingMessage(
self._pika_engine, self._channel, self._method, self._properties,
self._body
)
self.assertEqual(message.ctxt.get("key_context", None),
"context_value")
self.assertEqual(message.msg_id, None)
self.assertEqual(message.reply_q, None)
self.assertEqual(message.message.get("payload_key", None),
"payload_value")
message.reply(reply=object())
self.assertEqual(send_reply_mock.call_count, 0)
@patch("oslo_messaging._drivers.pika_driver.pika_message."
"RpcReplyPikaOutgoingMessage")
@patch("retrying.retry")
def test_positive_reply_for_call_message(self,
retry_mock,
outgoing_message_mock):
self._properties.correlation_id = 123456789
self._properties.reply_to = "reply_queue"
message = pika_drv_msg.RpcPikaIncomingMessage(
self._pika_engine, self._channel, self._method, self._properties,
self._body
)
self.assertEqual(message.ctxt.get("key_context", None),
"context_value")
self.assertEqual(message.msg_id, 123456789)
self.assertEqual(message.reply_q, "reply_queue")
self.assertEqual(message.message.get("payload_key", None),
"payload_value")
reply = "all_fine"
message.reply(reply=reply)
outgoing_message_mock.assert_called_once_with(
self._pika_engine, 123456789, failure_info=None, reply='all_fine',
content_encoding='utf-8', content_type='application/json'
)
outgoing_message_mock().send.assert_called_once_with(
expiration_time=None, reply_q='reply_queue', retrier=mock.ANY
)
retry_mock.assert_called_once_with(
retry_on_exception=mock.ANY, stop_max_attempt_number=3,
wait_fixed=250.0
)
@patch("oslo_messaging._drivers.pika_driver.pika_message."
"RpcReplyPikaOutgoingMessage")
@patch("retrying.retry")
def test_negative_reply_for_call_message(self,
retry_mock,
outgoing_message_mock):
self._properties.correlation_id = 123456789
self._properties.reply_to = "reply_queue"
message = pika_drv_msg.RpcPikaIncomingMessage(
self._pika_engine, self._channel, self._method, self._properties,
self._body
)
self.assertEqual(message.ctxt.get("key_context", None),
"context_value")
self.assertEqual(message.msg_id, 123456789)
self.assertEqual(message.reply_q, "reply_queue")
self.assertEqual(message.message.get("payload_key", None),
"payload_value")
failure_info = object()
message.reply(failure=failure_info)
outgoing_message_mock.assert_called_once_with(
self._pika_engine, 123456789,
failure_info=failure_info,
reply=None,
content_encoding='utf-8',
content_type='application/json'
)
outgoing_message_mock().send.assert_called_once_with(
expiration_time=None, reply_q='reply_queue', retrier=mock.ANY
)
retry_mock.assert_called_once_with(
retry_on_exception=mock.ANY, stop_max_attempt_number=3,
wait_fixed=250.0
)
class RpcReplyPikaIncomingMessageTestCase(unittest.TestCase):
def setUp(self):
self._pika_engine = mock.Mock()
self._pika_engine.allowed_remote_exmods = [
pika_engine._EXCEPTIONS_MODULE, "oslo_messaging.exceptions"
]
self._channel = mock.Mock()
self._delivery_tag = 12345
self._method = pika.spec.Basic.Deliver(delivery_tag=self._delivery_tag)
self._properties = pika.BasicProperties(
content_type="application/json",
content_encoding="utf-8",
headers={"version": "1.0"},
correlation_id=123456789
)
def test_positive_reply_message_body_parsing(self):
body = b'{"s": "all fine"}'
message = pika_drv_msg.RpcReplyPikaIncomingMessage(
self._pika_engine, self._channel, self._method, self._properties,
body
)
self.assertEqual(message.msg_id, 123456789)
self.assertIsNone(message.failure)
self.assertEqual(message.result, "all fine")
def test_negative_reply_message_body_parsing(self):
body = (b'{'
b' "e": {'
b' "s": "Error message",'
b' "t": ["TRACE HERE"],'
b' "c": "MessagingException",'
b' "m": "oslo_messaging.exceptions"'
b' }'
b'}')
message = pika_drv_msg.RpcReplyPikaIncomingMessage(
self._pika_engine, self._channel, self._method, self._properties,
body
)
self.assertEqual(message.msg_id, 123456789)
self.assertIsNone(message.result)
self.assertEqual(
str(message.failure),
'Error message\n'
'TRACE HERE'
)
self.assertIsInstance(message.failure,
oslo_messaging.MessagingException)
class PikaOutgoingMessageTestCase(unittest.TestCase):
def setUp(self):
self._pika_engine = mock.MagicMock()
self._exchange = "it is exchange"
self._routing_key = "it is routing key"
self._expiration = 1
self._expiration_time = time.time() + self._expiration
self._mandatory = object()
self._message = {"msg_type": 1, "msg_str": "hello"}
self._context = {"request_id": 555, "token": "it is a token"}
@patch("oslo_serialization.jsonutils.dumps",
new=functools.partial(jsonutils.dumps, sort_keys=True))
def test_send_with_confirmation(self):
message = pika_drv_msg.PikaOutgoingMessage(
self._pika_engine, self._message, self._context
)
message.send(
exchange=self._exchange,
routing_key=self._routing_key,
confirm=True,
mandatory=self._mandatory,
persistent=True,
expiration_time=self._expiration_time,
retrier=None
)
self._pika_engine.connection_with_confirmation_pool.acquire(
).__enter__().channel.publish.assert_called_once_with(
body=mock.ANY,
exchange=self._exchange, mandatory=self._mandatory,
properties=mock.ANY,
routing_key=self._routing_key
)
body = self._pika_engine.connection_with_confirmation_pool.acquire(
).__enter__().channel.publish.call_args[1]["body"]
self.assertEqual(
b'{"_$_request_id": 555, "_$_token": "it is a token", '
b'"msg_str": "hello", "msg_type": 1}',
body
)
props = self._pika_engine.connection_with_confirmation_pool.acquire(
).__enter__().channel.publish.call_args[1]["properties"]
self.assertEqual(props.content_encoding, 'utf-8')
self.assertEqual(props.content_type, 'application/json')
self.assertEqual(props.delivery_mode, 2)
self.assertTrue(self._expiration * 1000 - float(props.expiration) <
100)
self.assertEqual(props.headers, {'version': '1.0'})
self.assertTrue(props.message_id)
@patch("oslo_serialization.jsonutils.dumps",
new=functools.partial(jsonutils.dumps, sort_keys=True))
def test_send_without_confirmation(self):
message = pika_drv_msg.PikaOutgoingMessage(
self._pika_engine, self._message, self._context
)
message.send(
exchange=self._exchange,
routing_key=self._routing_key,
confirm=False,
mandatory=self._mandatory,
persistent=False,
expiration_time=self._expiration_time,
retrier=None
)
self._pika_engine.connection_without_confirmation_pool.acquire(
).__enter__().channel.publish.assert_called_once_with(
body=mock.ANY,
exchange=self._exchange, mandatory=self._mandatory,
properties=mock.ANY,
routing_key=self._routing_key
)
body = self._pika_engine.connection_without_confirmation_pool.acquire(
).__enter__().channel.publish.call_args[1]["body"]
self.assertEqual(
b'{"_$_request_id": 555, "_$_token": "it is a token", '
b'"msg_str": "hello", "msg_type": 1}',
body
)
props = self._pika_engine.connection_without_confirmation_pool.acquire(
).__enter__().channel.publish.call_args[1]["properties"]
self.assertEqual(props.content_encoding, 'utf-8')
self.assertEqual(props.content_type, 'application/json')
self.assertEqual(props.delivery_mode, 1)
self.assertTrue(self._expiration * 1000 - float(props.expiration)
< 100)
self.assertEqual(props.headers, {'version': '1.0'})
self.assertTrue(props.message_id)
class RpcPikaOutgoingMessageTestCase(unittest.TestCase):
def setUp(self):
self._exchange = "it is exchange"
self._routing_key = "it is routing key"
self._pika_engine = mock.MagicMock()
self._pika_engine.get_rpc_exchange_name.return_value = self._exchange
self._pika_engine.get_rpc_queue_name.return_value = self._routing_key
self._message = {"msg_type": 1, "msg_str": "hello"}
self._context = {"request_id": 555, "token": "it is a token"}
@patch("oslo_serialization.jsonutils.dumps",
new=functools.partial(jsonutils.dumps, sort_keys=True))
def test_send_cast_message(self):
message = pika_drv_msg.RpcPikaOutgoingMessage(
self._pika_engine, self._message, self._context
)
expiration = 1
expiration_time = time.time() + expiration
message.send(
exchange=self._exchange,
routing_key=self._routing_key,
reply_listener=None,
expiration_time=expiration_time,
retrier=None
)
self._pika_engine.connection_with_confirmation_pool.acquire(
).__enter__().channel.publish.assert_called_once_with(
body=mock.ANY,
exchange=self._exchange, mandatory=True,
properties=mock.ANY,
routing_key=self._routing_key
)
body = self._pika_engine.connection_with_confirmation_pool.acquire(
).__enter__().channel.publish.call_args[1]["body"]
self.assertEqual(
b'{"_$_request_id": 555, "_$_token": "it is a token", '
b'"msg_str": "hello", "msg_type": 1}',
body
)
props = self._pika_engine.connection_with_confirmation_pool.acquire(
).__enter__().channel.publish.call_args[1]["properties"]
self.assertEqual(props.content_encoding, 'utf-8')
self.assertEqual(props.content_type, 'application/json')
self.assertEqual(props.delivery_mode, 1)
self.assertTrue(expiration * 1000 - float(props.expiration) < 100)
self.assertEqual(props.headers, {'version': '1.0'})
self.assertIsNone(props.correlation_id)
self.assertIsNone(props.reply_to)
self.assertTrue(props.message_id)
@patch("oslo_serialization.jsonutils.dumps",
new=functools.partial(jsonutils.dumps, sort_keys=True))
def test_send_call_message(self):
message = pika_drv_msg.RpcPikaOutgoingMessage(
self._pika_engine, self._message, self._context
)
expiration = 1
expiration_time = time.time() + expiration
result = "it is a result"
reply_queue_name = "reply_queue_name"
future = futures.Future()
future.set_result(result)
reply_listener = mock.Mock()
reply_listener.register_reply_waiter.return_value = future
reply_listener.get_reply_qname.return_value = reply_queue_name
res = message.send(
exchange=self._exchange,
routing_key=self._routing_key,
reply_listener=reply_listener,
expiration_time=expiration_time,
retrier=None
)
self.assertEqual(result, res)
self._pika_engine.connection_with_confirmation_pool.acquire(
).__enter__().channel.publish.assert_called_once_with(
body=mock.ANY,
exchange=self._exchange, mandatory=True,
properties=mock.ANY,
routing_key=self._routing_key
)
body = self._pika_engine.connection_with_confirmation_pool.acquire(
).__enter__().channel.publish.call_args[1]["body"]
self.assertEqual(
b'{"_$_request_id": 555, "_$_token": "it is a token", '
b'"msg_str": "hello", "msg_type": 1}',
body
)
props = self._pika_engine.connection_with_confirmation_pool.acquire(
).__enter__().channel.publish.call_args[1]["properties"]
self.assertEqual(props.content_encoding, 'utf-8')
self.assertEqual(props.content_type, 'application/json')
self.assertEqual(props.delivery_mode, 1)
self.assertTrue(expiration * 1000 - float(props.expiration) < 100)
self.assertEqual(props.headers, {'version': '1.0'})
self.assertEqual(props.correlation_id, message.msg_id)
self.assertEqual(props.reply_to, reply_queue_name)
self.assertTrue(props.message_id)
class RpcReplyPikaOutgoingMessageTestCase(unittest.TestCase):
def setUp(self):
self._reply_q = "reply_queue_name"
self._expiration = 1
self._expiration_time = time.time() + self._expiration
self._pika_engine = mock.MagicMock()
self._rpc_reply_exchange = "rpc_reply_exchange"
self._pika_engine.rpc_reply_exchange = self._rpc_reply_exchange
self._msg_id = 12345567
@patch("oslo_serialization.jsonutils.dumps",
new=functools.partial(jsonutils.dumps, sort_keys=True))
def test_success_message_send(self):
message = pika_drv_msg.RpcReplyPikaOutgoingMessage(
self._pika_engine, self._msg_id, reply="all_fine"
)
message.send(self._reply_q, expiration_time=self._expiration_time,
retrier=None)
self._pika_engine.connection_with_confirmation_pool.acquire(
).__enter__().channel.publish.assert_called_once_with(
body=b'{"s": "all_fine"}',
exchange=self._rpc_reply_exchange, mandatory=True,
properties=mock.ANY,
routing_key=self._reply_q
)
props = self._pika_engine.connection_with_confirmation_pool.acquire(
).__enter__().channel.publish.call_args[1]["properties"]
self.assertEqual(props.content_encoding, 'utf-8')
self.assertEqual(props.content_type, 'application/json')
self.assertEqual(props.delivery_mode, 1)
self.assertTrue(self._expiration * 1000 - float(props.expiration) <
100)
self.assertEqual(props.headers, {'version': '1.0'})
self.assertEqual(props.correlation_id, message.msg_id)
self.assertIsNone(props.reply_to)
self.assertTrue(props.message_id)
@patch("traceback.format_exception", new=lambda x, y, z: z)
@patch("oslo_serialization.jsonutils.dumps",
new=functools.partial(jsonutils.dumps, sort_keys=True))
def test_failure_message_send(self):
failure_info = (oslo_messaging.MessagingException,
oslo_messaging.MessagingException("Error message"),
['It is a trace'])
message = pika_drv_msg.RpcReplyPikaOutgoingMessage(
self._pika_engine, self._msg_id, failure_info=failure_info
)
message.send(self._reply_q, expiration_time=self._expiration_time,
retrier=None)
self._pika_engine.connection_with_confirmation_pool.acquire(
).__enter__().channel.publish.assert_called_once_with(
body=mock.ANY,
exchange=self._rpc_reply_exchange,
mandatory=True,
properties=mock.ANY,
routing_key=self._reply_q
)
body = self._pika_engine.connection_with_confirmation_pool.acquire(
).__enter__().channel.publish.call_args[1]["body"]
self.assertEqual(
b'{"e": {"c": "MessagingException", '
b'"m": "oslo_messaging.exceptions", "s": "Error message", '
b'"t": ["It is a trace"]}}',
body
)
props = self._pika_engine.connection_with_confirmation_pool.acquire(
).__enter__().channel.publish.call_args[1]["properties"]
self.assertEqual(props.content_encoding, 'utf-8')
self.assertEqual(props.content_type, 'application/json')
self.assertEqual(props.delivery_mode, 1)
self.assertTrue(self._expiration * 1000 - float(props.expiration) <
100)
self.assertEqual(props.headers, {'version': '1.0'})
self.assertEqual(props.correlation_id, message.msg_id)
self.assertIsNone(props.reply_to)
self.assertTrue(props.message_id)
| {
"content_hash": "44576eb1949d22f8be348c311c2c9af3",
"timestamp": "",
"source": "github",
"line_count": 608,
"max_line_length": 79,
"avg_line_length": 36.73848684210526,
"alnum_prop": 0.5927385056184805,
"repo_name": "dukhlov/oslo.messaging",
"id": "0cc1b869d675b5f6f77a068ada4f09a857f060ed",
"size": "22946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oslo_messaging/tests/drivers/pika/test_message.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "912386"
},
{
"name": "Shell",
"bytes": "8086"
}
],
"symlink_target": ""
} |
"""
PyCOMPSs Testbench
========================
"""
# Imports
from pycompss.api.task import task
from pycompss.api.mpi import mpi
from pycompss.api.constraint import constraint
from pycompss.api.api import compss_wait_on
@constraint(computing_units="2")
@mpi(runner="mpirun", computing_nodes="1")
@task(returns=list)
def return_rank(seed):
from mpi4py import MPI
size = MPI.COMM_WORLD.size
rank = MPI.COMM_WORLD.rank
print("Launched MPI communicator with {0} MPI processes".format(size))
return rank+seed
def main():
from pycompss.api.api import compss_barrier
rank_list = return_rank(10)
rank_list = compss_wait_on(rank_list)
expected_ranks = [10, 11]
assert rank_list == expected_ranks, "Incorrect returns. Actual Result: {0} \nExpected Result: {1} ".format(rank_list, expected_ranks)
print("Finished")
if __name__ == '__main__':
main()
| {
"content_hash": "dad4f2576538f0994a7e93aa94aaf7d4",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 137,
"avg_line_length": 22.9,
"alnum_prop": 0.6626637554585153,
"repo_name": "mF2C/COMPSs",
"id": "20b8bf22ffdbd4f4114f356134f9dcc79058475b",
"size": "960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sources/python/9_python_mpi/src/python_mpi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "1595"
},
{
"name": "C",
"bytes": "222477"
},
{
"name": "C++",
"bytes": "200186"
},
{
"name": "Dockerfile",
"bytes": "901"
},
{
"name": "Gnuplot",
"bytes": "4195"
},
{
"name": "Java",
"bytes": "4213323"
},
{
"name": "JavaScript",
"bytes": "16906"
},
{
"name": "Jupyter Notebook",
"bytes": "10514"
},
{
"name": "Lex",
"bytes": "1356"
},
{
"name": "M4",
"bytes": "5538"
},
{
"name": "Makefile",
"bytes": "14740"
},
{
"name": "Python",
"bytes": "635267"
},
{
"name": "Shell",
"bytes": "1241476"
},
{
"name": "XSLT",
"bytes": "177323"
},
{
"name": "Yacc",
"bytes": "3655"
}
],
"symlink_target": ""
} |
""" swagger format declartion """
import venusian
from .interfaces import CATEGORY
__all__ = ('format', 'SwaggerFormat')
class SwaggerFormat(object):
def __init__(self, name):
self.format = name
def to_wire(self, item):
raise NotImplementedError
def to_python(self, value):
raise NotImplementedError
def validate(self, value):
pass
class format(object):
def __init__(self, name):
self.name = name
def __call__(self, wrapped):
assert issubclass(wrapped, SwaggerFormat)
def register(scanner, name, wrapped):
scanner.config.loader.register_format(wrapped(self.name))
venusian.attach(wrapped, register, category=CATEGORY)
return wrapped
| {
"content_hash": "9f732e85b3e1e5bd00b17d1ae7b1c204",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 69,
"avg_line_length": 21.542857142857144,
"alnum_prop": 0.6392572944297082,
"repo_name": "fafhrd91/mdl",
"id": "ba5524898550636ab03a148f4d2ff965e28e0950",
"size": "754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mdl/formatter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1318"
},
{
"name": "Python",
"bytes": "145033"
}
],
"symlink_target": ""
} |
from entity.Entity import Entity
from statisticalDistributions import *
class InventoryEntity(Entity):
__metaclass__ = ABCMeta
def __init__(self, simSystem, Type, id, inputPointer, outputPointer):
super(InventoryEntity, self).__init__(simSystem, Type, id, inputPointer, outputPointer)
@abstractmethod
def takeOrder(self, amount):
pass
@abstractmethod
def giveOrder(self, amount):
pass
| {
"content_hash": "e42df385988ab7bdac793e72d246d4c3",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 95,
"avg_line_length": 26,
"alnum_prop": 0.6855203619909502,
"repo_name": "mrhsce/simPython",
"id": "00cc284f4bda0fdad22c454bd2622e73e191a975",
"size": "442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "entity/inventoryEntity/InventoryEntity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23870"
}
],
"symlink_target": ""
} |
import os, random
import queue
# In[2]:
def hierachy_generator(k, size):
index = 0
hierachy_dict = {}
sub_ordinates_dict = {}
employees = []
root = 'e'+str(index)
index+=1
employees.append(root)
hierachy_dict[root] = []
sub_ordinates_dict[root] = []
return_nodes = queue.Queue()
return_nodes.put(root)
while index<size:
node = return_nodes.get()
hierachy_dict, sub_ordinates_dict, employees, return_nodes, index = generate_sub_ordinates(hierachy_dict, sub_ordinates_dict, employees, node, return_nodes, k , index)
return hierachy_dict, sub_ordinates_dict, employees, index
# In[3]:
def generate_sub_ordinates(hierachy_dict, sub_ordinates_dict, employees, node, return_nodes, k , index):
while len(hierachy_dict[node])<k:
node_new = 'e'+str(index)
employees.append(node_new)
index+=1
return_nodes.put(node_new)
hierachy_dict[node].append(node_new)
hierachy_dict[node_new] = []
sub_ordinates_dict[node_new] = [node]
for m in sub_ordinates_dict[node]:
if m in sub_ordinates_dict[node_new]:continue
if m==node_new: continue
sub_ordinates_dict[node_new].append(m)
return hierachy_dict, sub_ordinates_dict, employees, return_nodes, index
# In[4]:
def generate_label(hierachy_dict, sub_ordinates_dict, employees, manager_file, label_file, employees_file, delta):
junior_employees = []
with open(manager_file, 'w') as mf:
for e, ms in sub_ordinates_dict.items():
for m in ms:
print('%s\t%s'%(m,e), file=mf)
for m,emp in hierachy_dict.items():
if len(emp)==0:
junior_employees.append(m)
size_A = 0
with open(label_file, 'w') as lf:
for e in junior_employees:
if random.random()<delta:
print('%s\tA'%(e), file=lf)
size_A+=1
else:
print('%s\tB'%(e), file=lf)
for e in employees:
if e in junior_employees:continue
print('%s\tB'%(e), file=lf)
print(size_A)
with open(employees_file, 'w') as ef:
for e in employees:
print('%s'%(e), file = ef)
# In[66]:
def generate_label_2(hierachy_dict, sub_ordinates_dict, employees, manager_file, label_file, employees_file, delta):
junior_employees = []
with open(manager_file, 'w') as mf:
for e, ms in sub_ordinates_dict.items():
for m in ms:
print('%s\t%s'%(m,e), file=mf)
for m,emp in hierachy_dict.items():
if len(emp)==0:
junior_employees.append(m)
size_A = 0
with open(label_file, 'w') as lf:
for e in employees:
if random.random()<0.5:
print('%s\tA'%(e), file=lf)
size_A+=1
else:
print('%s\tB'%(e), file=lf)
print(size_A)
with open(employees_file, 'w') as ef:
for e in employees:
print('%s'%(e), file = ef)
# In[102]:
def run_generator(k, size, manager_file, label_file, employees_file, delta):
hierachy_dict, sub_ordinates_dict, employees, index = hierachy_generator(k, size)
print(len(employees))
print(len(hierachy_dict.keys()))
print(len(sub_ordinates_dict.keys()))
#glass cieling
#generate_label(hierachy_dict, sub_ordinates_dict, employees, manager_file, label_file, employees_file, delta)
#uniform
generate_label_2(hierachy_dict, sub_ordinates_dict, employees, manager_file, label_file, employees_file, delta)
# In[111]:
k = 5
size = 100
manager_file = '../data/test/manager.txt'
label_file = '../data/test/label.txt'
employees_file = '../data/test/employee.txt'
delta = 0.7
run_generator(k, size, manager_file, label_file, employees_file, delta)
# In[ ]:
# In[ ]:
| {
"content_hash": "1304b5f5a60b9776260b6033e3c529f8",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 175,
"avg_line_length": 27.907801418439718,
"alnum_prop": 0.5786531130876748,
"repo_name": "gfarnadi/FairPSL",
"id": "fb69f31975721b03737c716646762059ad7bef51",
"size": "3985",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "problems/performance_review/data/hierachy_generation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "169716"
},
{
"name": "Python",
"bytes": "92081"
}
],
"symlink_target": ""
} |
from django.db import models
from django.conf import settings
from estudios_socioeconomicos.models import Estudio
class Retroalimentacion(models.Model):
""" The model that represents the feedback on a study.
This model contains the feedback left either by the admin or
by a capturista on a study during the review process.
Attributes:
-----------
estudio : Estudio
The study which is being checked by the user. The feedback
is about this study.
usuario : User
The user who writes the feedback. It may be either the admin or
the capturista.
fecha : DateTimeField
The time at which the feedback is filled.
descripcion : TextField
The actual feedback text.
activo : BooleanField
Boolean indicating whether this feedback should be shown or not.
"""
estudio = models.ForeignKey(Estudio, related_name='retroalimentacion_estudio')
usuario = models.ForeignKey(settings.AUTH_USER_MODEL)
fecha = models.DateTimeField(null=True, blank=True, auto_now_add=True)
descripcion = models.TextField()
activo = models.BooleanField(default=True)
def __str__(self):
return '{usuario} - {fecha}: {descripcion}'.format(
fecha=str(self.fecha),
usuario=str(self.usuario),
descripcion=self.descripcion[:40])
| {
"content_hash": "567f52d0060269a964478b11e064302a",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 82,
"avg_line_length": 37.53846153846154,
"alnum_prop": 0.6325136612021858,
"repo_name": "erikiado/jp2_online",
"id": "664ac93bb7aa48c7afb638c56110b205be31d1b6",
"size": "1464",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "captura/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14504"
},
{
"name": "HTML",
"bytes": "146491"
},
{
"name": "JavaScript",
"bytes": "15162"
},
{
"name": "Python",
"bytes": "586358"
}
],
"symlink_target": ""
} |
import re
from abc import abstractmethod
from datetime import date, datetime, time, timedelta
from enum import Enum
from json.encoder import JSONEncoder as BaseJSONEncoder
from .base import AccessMode
from .fields import MultiTypeField
from .model_types import ListModel
from .models import BaseModel
__all__ = ['underscore_to_camel',
'BaseModelIterator',
'ModelIterator',
'ListFormatterIter',
'BaseModelFormatterIter',
'ModelFormatterIter',
'JSONEncoder',
'Factory',
'factory']
def underscore_to_camel(string):
"""
Converts underscored string to camel case.
"""
return re.sub('_([a-z])', lambda x: x.group(1).upper(), string)
class BaseModelIterator:
def __init__(self, model):
self.model = model
def __iter__(self):
fields = self.model.get_fields()
for fieldname in fields:
field = self.model.get_field_obj(fieldname)
name = self.model.get_real_name(fieldname)
yield name, field, self.model.get_field_value(fieldname)
class ModelIterator(BaseModelIterator):
"""
Helper in order to iterate over model fields.
"""
def __iter__(self):
for name, field, value in super(ModelIterator, self).__iter__():
yield name, value
items = __iter__
def values(self):
for _, value in self:
yield value
def keys(self):
for name, _ in self:
yield name
class BaseFormatterIter:
@abstractmethod
def format(self): # pragma: no cover
pass
class BaseFieldtypeFormatterIter(BaseFormatterIter):
def __init__(self, obj, field, parent_formatter):
self.obj = obj
self.field = field
self.parent_formatter = parent_formatter
class ListFormatterIter(BaseFieldtypeFormatterIter):
def __iter__(self):
for item in self.obj:
yield self.parent_formatter.format_field(self.field, item)
def format(self):
return list(self)
class BaseModelFormatterIter(BaseModelIterator, BaseFormatterIter):
"""
Base formatter iterator for Dirty Models.
"""
def __iter__(self):
for name, field, value in super(BaseModelFormatterIter, self).__iter__():
if self.must_to_skip(name, field, value):
continue
yield name, self.format_field(field,
self.model.get_field_value(name))
def must_to_skip(self, name, field, value):
am = field.access_mode
try:
am = self.model.__override_field_access_modes__[name]
except KeyError:
pass
if am == AccessMode.HIDDEN:
return True
return False
def format_field(self, field, value):
if isinstance(field, MultiTypeField):
return self.format_field(field.get_field_type_by_value(value), value)
elif isinstance(value, BaseModel):
return self.__class__(value)
elif isinstance(value, ListModel):
return ListFormatterIter(obj=value, field=value.get_field_type(), parent_formatter=self)
elif isinstance(value, Enum):
return self.format_field(field, value.value)
return value
def format(self):
return {k: v.format() if isinstance(v, BaseFormatterIter) else v for k, v in self}
class ModelFormatterIter(BaseModelFormatterIter):
"""
Iterate over model fields formatting them.
"""
def format_field(self, field, value):
if isinstance(value, (date, datetime, time)) and not isinstance(field, MultiTypeField):
try:
return field.get_formatted_value(value)
except AttributeError:
return str(value)
elif isinstance(value, timedelta):
return value.total_seconds()
return super(ModelFormatterIter, self).format_field(field, value)
class JSONEncoder(BaseJSONEncoder):
"""
Json encoder for Dirty Models
"""
default_model_iter = ModelFormatterIter
def default(self, obj):
if isinstance(obj, BaseModel):
return self.default(self.default_model_iter(obj))
elif isinstance(obj, BaseFormatterIter):
return obj.format()
else:
return super(JSONEncoder, self).default(obj)
class Factory:
"""
Factory decorator could be used to define result of a function as default value. It could
be useful to define a :class:`~dirty_models.fields.DateTimeField` with :meth:`datetime.datetime.now`
in order to set the current datetime.
"""
def __init__(self, func):
self.func = func
def __call__(self):
return self.func()
factory = Factory
| {
"content_hash": "41cb0b322695c1c6e8330cd7bfc8c578",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 104,
"avg_line_length": 27.15909090909091,
"alnum_prop": 0.6194560669456067,
"repo_name": "alfred82santa/dirty-models",
"id": "eb215b582439aab7aa761c9d23f44ceb68d89518",
"size": "4780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dirty_models/utils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1833"
},
{
"name": "Python",
"bytes": "277583"
}
],
"symlink_target": ""
} |
"""
Module showing amount of windows at the scratchpad.
@author shadowprince
@license Eclipse Public License
"""
import i3
from time import time
def find_scratch(tree):
if tree["name"] == "__i3_scratch":
return tree
else:
for x in tree["nodes"]:
result = find_scratch(x)
if result:
return result
return None
class Py3status:
# available configuration parameters
cache_timeout = 5
format = "{} ⌫" # format of indicator. {} replaces with count of windows
hide_when_none = False # hide indicator when there is no windows
def __init__(self):
self.count = -1
def scratchpad_counter(self, i3s_output_list, i3s_config):
count = len(find_scratch(i3.get_tree()).get("floating_nodes", []))
if self.count != count:
transformed = True
self.count = count
else:
transformed = False
response = {
'cached_until': time() + self.cache_timeout,
'transformed': transformed
}
if self.hide_when_none and count == 0:
response['full_text'] = ''
else:
response['full_text'] = self.format.format(count)
return response
if __name__ == "__main__":
"""
Test this module by calling it directly.
"""
from time import sleep
x = Py3status()
while True:
print(x.scratchpad_counter([], {}))
sleep(1)
| {
"content_hash": "b1c5d0dad4a5ad54c79718f073367444",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 77,
"avg_line_length": 24.131147540983605,
"alnum_prop": 0.5645380434782609,
"repo_name": "FedericoCeratto/debian-py3status",
"id": "0ee9a4939d38de3581706855e7f909eea803ea41",
"size": "1498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py3status/modules/scratchpad_counter.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "126913"
}
],
"symlink_target": ""
} |
import common.ot_utils
def find_reports_dist(r1,r2):
my_loc1 = r1.my_loc
my_loc2 = r2.my_loc
result = common.ot_utils.latlon_to_meters(my_loc1.lat, my_loc1.lon, my_loc2.lat, my_loc2.lon)
return result
def find_distance_in_reports(reports,full=False):
reports = list(reports)
result = []
for idx in xrange(0,len(reports)-1):
r1 = reports[idx]
r2 = reports[idx+1]
if hasattr(r1,'my_loc') and hasattr(r2,'my_loc'):
dist = find_reports_dist(r1,r2)
if full:
result.append((r1,r2,dist))
else:
result.append(dist)
return result
def find_device_ids(device_pat):
import models
device_ids = models.Report.objects.filter(device_id__contains=device_pat).values_list('device_id',flat=True).distinct()
return device_ids
def get_reports(device_id):
import models
result = list(models.Report.objects.filter(device_id=device_id).order_by('timestamp'))
return result
def analyze_bssid(bssid):
from models import SingleWifiReport,Report,LocationInfo
wifi_reports = SingleWifiReport.objects.filter(key=bssid).order_by('report__timestamp')
print 'Number of wifi reports = %d' % wifi_reports.count()
names = SingleWifiReport.objects.filter(key=bssid).values_list('SSID',flat=True).distinct()
print 'Names = %s' % (','.join(names))
reports = Report.objects.filter(id__in=wifi_reports.values_list('report')).order_by('timestamp')
print 'Number of reports = %s' % reports.count()
locs = list(LocationInfo.objects.filter(id__in=reports.values_list('my_loc')).order_by('timestamp'))
print 'Number of locations = %s' % (len(locs))
min_lat = min(loc.lat for loc in locs)
max_lat = max(loc.lat for loc in locs)
min_lon = min(loc.lon for loc in locs)
max_lon = max(loc.lon for loc in locs)
max_dist = common.ot_utils.latlon_to_meters(min_lat,min_lon,max_lat,max_lon)
print 'Maximal distance = %.2f' % (max_dist)
for idx,loc in enumerate(locs[0:-1]):
loc_next = locs[idx+1]
dist_next = locs_dist(loc,loc_next)
time_diff = int((loc_next.timestamp - loc.timestamp).total_seconds())
if dist_next > 500:
data = dict()
is_same_device = loc.report.device_id == loc_next.report.device_id
if is_same_device:
device_status = 'SAME DEVICE %s' % (loc.report.device_id)
else:
device_status = '%s %s' % (loc.report.device_id,loc_next.report.device_id)
data['title_cur'] = 'CUR'
data['title_next'] = 'NEXT'
data['title_delta'] = 'DELTA'
data['id_cur'] = loc.report.id
data['id_next'] = loc_next.report.id
data['gpsts_cur'] = loc.timestamp.replace(microsecond=0).isoformat().replace('+00:00','')
data['gpsts_next'] = loc_next.timestamp.replace(microsecond=0).isoformat().replace('+00:00','')
data['repts_cur'] = loc.report.timestamp.replace(microsecond=0).isoformat().replace('+00:00','')
data['repts_next'] = loc_next.report.timestamp.replace(microsecond=0).isoformat().replace('+00:00','')
data['dev_cur'] = loc.report.device_id
data['dev_next'] = loc_next.report.device_id
data['total_dist'] = dist_next
data['idx'] = idx
data['repts_delta'] = int((loc_next.report.timestamp - loc.report.timestamp).total_seconds())
data['gpsts_delta'] = int((loc_next.timestamp - loc.timestamp).total_seconds())
data['dev_delta'] = 'SAME' if loc.report.device_id == loc_next.report.device_id else ''
print '=' * 60
print '''%(idx)3s: Distance: %(total_dist)8.2f
%(title_cur)20s %(title_next)20s %(title_delta)10s
REPORT ID: %(id_cur)20s %(id_next)20s
GPS TIMESTAMP: %(gpsts_cur)20s %(gpsts_next)20s %(gpsts_delta)10s
REPROT TIMESTAMP: %(repts_cur)20s %(repts_next)20s %(repts_delta)10s
DEVICE: %(dev_cur)20s %(dev_next)20s %(dev_delta)10s
''' % data
def locs_dist(loc1,loc2):
return common.ot_utils.latlon_to_meters(loc1.lat,loc1.lon,loc2.lat,loc2.lon)
def delete_device_id(device_id):
import analysis.models
reports_to_delete = analysis.models.Report.objects.filter(device_id=device_id)
reports_to_delete.delete()
import reports
raw_reports = reports.models.RawReport.objects.filter(text__contains=device_id)
raw_reports.delete()
| {
"content_hash": "b560b3f50deceb462a0d6564403370ee",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 123,
"avg_line_length": 47.4,
"alnum_prop": 0.6215856095936043,
"repo_name": "hasadna/OpenTrain",
"id": "7ee8ecccb4e38eb0209b63bf6692b2194621ff24",
"size": "4503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webserver/opentrain/analysis/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "857"
},
{
"name": "CSS",
"bytes": "22895"
},
{
"name": "JavaScript",
"bytes": "276346"
},
{
"name": "Python",
"bytes": "398492"
},
{
"name": "Shell",
"bytes": "671"
}
],
"symlink_target": ""
} |
from nose.plugins.attrib import attr
from test.integration.base import DBTIntegrationTest, use_profile
class TestSimpleCopy(DBTIntegrationTest):
@property
def schema(self):
return "simple_copy_001"
@staticmethod
def dir(path):
return "test/integration/001_simple_copy_test/" + path.lstrip("/")
@property
def models(self):
return self.dir("models")
@use_profile("postgres")
def test__postgres__simple_copy(self):
self.use_default_project({"data-paths": [self.dir("seed-initial")]})
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
results = self.run_dbt()
self.assertEqual(len(results), 6)
self.assertManyTablesEqual(["seed", "view_model", "incremental", "materialized"])
self.use_default_project({"data-paths": [self.dir("seed-update")]})
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
results = self.run_dbt()
self.assertEqual(len(results), 6)
self.assertManyTablesEqual(["seed", "view_model", "incremental", "materialized"])
@use_profile("postgres")
def test__postgres__dbt_doesnt_run_empty_models(self):
self.use_default_project({"data-paths": [self.dir("seed-initial")]})
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
results = self.run_dbt()
self.assertEqual(len(results), 6)
models = self.get_models_in_schema()
self.assertFalse("empty" in models.keys())
self.assertFalse("disabled" in models.keys())
@use_profile("snowflake")
def test__snowflake__simple_copy(self):
self.use_default_project({"data-paths": [self.dir("seed-initial")]})
self.run_dbt(["seed"])
self.run_dbt()
self.assertManyTablesEqual(["SEED", "VIEW_MODEL", "INCREMENTAL", "MATERIALIZED"])
self.use_default_project({"data-paths": [self.dir("seed-update")]})
self.run_dbt(["seed"])
self.run_dbt()
self.assertManyTablesEqual(["SEED", "VIEW_MODEL", "INCREMENTAL", "MATERIALIZED"])
@use_profile("snowflake")
def test__snowflake__simple_copy__quoting_on(self):
self.use_default_project({
"data-paths": [self.dir("seed-initial")],
"quoting": {"identifier": True},
})
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
results = self.run_dbt()
self.assertEqual(len(results), 6)
self.assertManyTablesEqual(["seed", "view_model", "incremental", "materialized"])
self.use_default_project({
"data-paths": [self.dir("seed-update")],
"quoting": {"identifier": True},
})
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
results = self.run_dbt()
self.assertEqual(len(results), 6)
self.assertManyTablesEqual(["seed", "view_model", "incremental", "materialized"])
@use_profile("snowflake")
def test__snowflake__simple_copy__quoting_off(self):
self.use_default_project({
"data-paths": [self.dir("seed-initial")],
"quoting": {"identifier": False},
})
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
results = self.run_dbt()
self.assertEqual(len(results), 6)
self.assertManyTablesEqual(["SEED", "VIEW_MODEL", "INCREMENTAL", "MATERIALIZED"])
self.use_default_project({
"data-paths": [self.dir("seed-update")],
"quoting": {"identifier": False},
})
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
results = self.run_dbt()
self.assertEqual(len(results), 6)
self.assertManyTablesEqual(["SEED", "VIEW_MODEL", "INCREMENTAL", "MATERIALIZED"])
@use_profile("snowflake")
def test__snowflake__seed__quoting_switch(self):
self.use_default_project({
"data-paths": [self.dir("seed-initial")],
"quoting": {"identifier": False},
})
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
self.use_default_project({
"data-paths": [self.dir("seed-update")],
"quoting": {"identifier": True},
})
results = self.run_dbt(["seed"], expect_pass=False)
@use_profile("bigquery")
def test__bigquery__simple_copy(self):
self.use_default_project({"data-paths": [self.dir("seed-initial")]})
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
results = self.run_dbt()
self.assertEqual(len(results), 6)
self.assertTablesEqual("seed","view_model")
self.assertTablesEqual("seed","incremental")
self.assertTablesEqual("seed","materialized")
self.use_default_project({"data-paths": [self.dir("seed-update")]})
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
results = self.run_dbt()
self.assertEqual(len(results), 6)
self.assertTablesEqual("seed","view_model")
self.assertTablesEqual("seed","incremental")
self.assertTablesEqual("seed","materialized")
class TestSimpleCopyLowercasedSchema(DBTIntegrationTest):
@property
def schema(self):
return "simple_copy_001"
@staticmethod
def dir(path):
return "test/integration/001_simple_copy_test/" + path.lstrip("/")
@property
def models(self):
return self.dir("models")
def unique_schema(self):
# bypass the forced uppercasing that unique_schema() does on snowflake
schema = super(TestSimpleCopyLowercasedSchema, self).unique_schema()
return schema.lower()
@use_profile('snowflake')
def test__snowflake__simple_copy(self):
self.use_default_project({"data-paths": [self.dir("seed-initial")]})
self.run_dbt(["seed"])
self.run_dbt()
self.assertManyTablesEqual(["SEED", "VIEW_MODEL", "INCREMENTAL", "MATERIALIZED"])
self.use_default_project({"data-paths": [self.dir("seed-update")]})
self.run_dbt(["seed"])
self.run_dbt()
self.assertManyTablesEqual(["SEED", "VIEW_MODEL", "INCREMENTAL", "MATERIALIZED"])
@use_profile("snowflake")
def test__snowflake__seed__quoting_switch_schema(self):
self.use_default_project({
"data-paths": [self.dir("seed-initial")],
"quoting": {"identifier": False, "schema": True},
})
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
self.use_default_project({
"data-paths": [self.dir("seed-update")],
"quoting": {"identifier": False, "schema": False},
})
results = self.run_dbt(["seed"], expect_pass=False)
| {
"content_hash": "43d519cbbe695e284609e068efaecfb3",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 89,
"avg_line_length": 33.61951219512195,
"alnum_prop": 0.5959082994776552,
"repo_name": "nave91/dbt",
"id": "f99ab209efc8e6f9127e0c6f578b63f373139073",
"size": "6892",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "test/integration/001_simple_copy_test/test_simple_copy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "814"
},
{
"name": "Python",
"bytes": "447357"
},
{
"name": "Shell",
"bytes": "617"
}
],
"symlink_target": ""
} |
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_format15.xlsx')
def test_create_file(self):
"""Test the creation of an XlsxWriter file with chart formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
chart.axis_ids = [42401792, 42403712]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
'trendline': {'type': 'linear'},
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
chart.set_legend({'delete_series': [2, 0]})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| {
"content_hash": "acacd5592c15feeba7c5381dfadbf9a9",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 76,
"avg_line_length": 24.90566037735849,
"alnum_prop": 0.5416666666666666,
"repo_name": "jmcnamara/XlsxWriter",
"id": "961fd570c65fc0f626adffa75462a6f3c37d0350",
"size": "1533",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "xlsxwriter/test/comparison/test_chart_format15.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7748"
},
{
"name": "Perl",
"bytes": "3503"
},
{
"name": "Python",
"bytes": "2807230"
},
{
"name": "Shell",
"bytes": "7964"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import datetime
import os
import re
import sys
import types
from django.conf import settings
from django.core.urlresolvers import resolve, Resolver404
from django.http import (HttpResponse, HttpResponseNotFound, HttpRequest,
build_request_repr)
from django.template import Template, Context, TemplateDoesNotExist
from django.template.defaultfilters import force_escape, pprint
from django.template.engine import Engine
from django.utils.datastructures import MultiValueDict
from django.utils.html import escape
from django.utils.encoding import force_bytes, smart_text
from django.utils import lru_cache
from django.utils.module_loading import import_string
from django.utils import six
from django.utils.translation import ugettext as _
HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE')
CLEANSED_SUBSTITUTE = '********************'
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p + 1
p = template_source.find('\n', p + 1)
yield len(template_source) + 1
class CallableSettingWrapper(object):
""" Object to wrap callable appearing in settings
* Not to call in the debug page (#21345).
* Not to break the debug page if the callable forbidding to set attributes (#23070).
"""
def __init__(self, callable_setting):
self._wrapped = callable_setting
def __repr__(self):
return repr(self._wrapped)
def cleanse_setting(key, value):
"""Cleanse an individual setting key/value of sensitive content.
If the value is a dictionary, recursively cleanse the keys in
that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = CLEANSED_SUBSTITUTE
else:
if isinstance(value, dict):
cleansed = {k: cleanse_setting(k, v) for k, v in value.items()}
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
if callable(cleansed):
# For fixing #21345 and #23070
cleansed = CallableSettingWrapper(cleansed)
return cleansed
def get_safe_settings():
"Returns a dictionary of the settings module, with sensitive settings blurred out."
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb, status_code=500):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
if request.is_ajax():
text = reporter.get_traceback_text()
return HttpResponse(text, status=status_code, content_type='text/plain')
else:
html = reporter.get_traceback_html()
return HttpResponse(html, status=status_code, content_type='text/html')
@lru_cache.lru_cache()
def get_default_exception_reporter_filter():
# Instantiate the default filter for the first time and cache it.
return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)()
def get_exception_reporter_filter(request):
default_filter = get_default_exception_reporter_filter()
return getattr(request, 'exception_reporter_filter', default_filter)
class ExceptionReporterFilter(object):
"""
Base for all exception reporter filter classes. All overridable hooks
contain lenient default behaviors.
"""
def get_request_repr(self, request):
if request is None:
return repr(None)
else:
return build_request_repr(request, POST_override=self.get_post_parameters(request))
def get_post_parameters(self, request):
if request is None:
return {}
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return list(six.iteritems(tb_frame.f_locals))
class SafeExceptionReporterFilter(ExceptionReporterFilter):
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_cleansed_multivaluedict(self, request, multivaluedict):
"""
Replaces the keys in a MultiValueDict marked as sensitive with stars.
This mitigates leaking sensitive POST parameters if something like
request.POST['nonexistent_key'] throws an exception (#21098).
"""
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
multivaluedict = multivaluedict.copy()
for param in sensitive_post_parameters:
if param in multivaluedict:
multivaluedict[param] = CLEANSED_SUBSTITUTE
return multivaluedict
def get_post_parameters(self, request):
"""
Replaces the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == '__ALL__':
# Cleanse all parameters.
for k, v in cleansed.items():
cleansed[k] = CLEANSED_SUBSTITUTE
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = CLEANSED_SUBSTITUTE
return cleansed
else:
return request.POST
def cleanse_special_types(self, request, value):
if isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
elif isinstance(value, MultiValueDict):
# Cleanse MultiValueDicts (request.POST is the one we usually care about)
value = self.get_cleansed_multivaluedict(request, value)
return value
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replaces the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper, 'sensitive_variables', None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed[name] = CLEANSED_SUBSTITUTE
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
else:
value = self.cleanse_special_types(request, value)
cleansed[name] = value
else:
# Potentially cleanse the request and any MultiValueDicts if they
# are one of the frame variables.
for name, value in tb_frame.f_locals.items():
cleansed[name] = self.cleanse_special_types(request, value)
if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in tb_frame.f_locals):
# For good measure, obfuscate the decorated function's arguments in
# the sensitive_variables decorator's frame, in case the variables
# associated with those arguments were meant to be obfuscated from
# the decorated function's frame.
cleansed['func_args'] = CLEANSED_SUBSTITUTE
cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE
return cleansed.items()
class ExceptionReporter(object):
"""
A class to organize and coordinate reporting on exceptions.
"""
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = None
self.template_does_not_exist = False
self.loader_debug_info = None
# Handle deprecated string exceptions
if isinstance(self.exc_type, six.string_types):
self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type)
self.exc_type = type(self.exc_value)
def format_path_status(self, path):
if not os.path.exists(path):
return "File does not exist"
if not os.path.isfile(path):
return "Not a file"
if not os.access(path, os.R_OK):
return "File is not readable"
return "File exists"
def get_traceback_data(self):
"""Return a dictionary containing traceback information."""
# TODO: handle multiple template engines.
template_engine = Engine.get_default()
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
self.template_does_not_exist = True
self.loader_debug_info = []
# If Django fails in get_template_loaders, provide an empty list
# for the following loop to not fail.
try:
template_loaders = template_engine.template_loaders
except Exception:
template_loaders = []
for loader in template_loaders:
try:
source_list_func = loader.get_template_sources
# NOTE: This assumes exc_value is the name of the template that
# the loader attempted to load.
template_list = [{
'name': t,
'status': self.format_path_status(t),
} for t in source_list_func(str(self.exc_value))]
except AttributeError:
template_list = []
loader_name = loader.__module__ + '.' + loader.__class__.__name__
self.loader_debug_info.append({
'loader': loader_name,
'templates': template_list,
})
if (template_engine.debug and
hasattr(self.exc_value, 'django_template_source')):
self.get_template_exception_info()
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame_vars = []
for k, v in frame['vars']:
v = pprint(v)
# The force_escape filter assume unicode, make sure that works
if isinstance(v, six.binary_type):
v = v.decode('utf-8', 'replace') # don't choke on non-utf-8 input
# Trim large blobs of data
if len(v) > 4096:
v = '%s... <trimmed %d bytes string>' % (v[0:4096], len(v))
frame_vars.append((k, force_escape(v)))
frame['vars'] = frame_vars
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = smart_text(
unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))],
'ascii', errors='replace'
)
from django import get_version
c = {
'is_email': self.is_email,
'unicode_hint': unicode_hint,
'frames': frames,
'request': self.request,
'filtered_POST': self.filter.get_post_parameters(self.request),
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': datetime.datetime.now(),
'django_version_info': get_version(),
'sys_path': sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'loader_debug_info': self.loader_debug_info,
}
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = smart_text(self.exc_value, errors='replace')
if frames:
c['lastframe'] = frames[-1]
return c
def get_traceback_html(self):
"Return HTML version of debug 500 HTTP error page."
t = Template(TECHNICAL_500_TEMPLATE, name='Technical 500 template')
c = Context(self.get_traceback_data(), use_l10n=False)
return t.render(c)
def get_traceback_text(self):
"Return plain text version of debug 500 HTTP error page."
t = Template(TECHNICAL_500_TEXT_TEMPLATE, name='Technical 500 template')
c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False)
return t.render(c)
def get_template_exception_info(self):
origin, (start, end) = self.exc_value.django_template_source
template_source = origin.reload()
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(template_source)):
if start >= upto and end <= next:
line = num
before = escape(template_source[upto:start])
during = escape(template_source[start:end])
after = escape(template_source[end:next])
source_lines.append((num, escape(template_source[upto:next])))
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
# In some rare cases, exc_value.args might be empty.
try:
message = self.exc_value.args[0]
except IndexError:
message = '(Could not get exception message)'
self.template_info = {
'message': message,
'source_lines': source_lines[top:bottom],
'before': before,
'during': during,
'after': after,
'top': top,
'bottom': bottom,
'total': total,
'line': line,
'name': origin.name,
}
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
try:
source = loader.get_source(module_name)
except ImportError:
pass
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, 'rb') as fp:
source = fp.read().splitlines()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a Unicode
# string, then we should do that ourselves.
if isinstance(source[0], six.binary_type):
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1).decode('ascii')
break
source = [six.text_type(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno + 1:upper_bound]
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
frames = []
tb = self.tb
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__') or ''
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(
filename, lineno, 7, loader, module_name,
)
if pre_context_lineno is not None:
frames.append({
'tb': tb,
'type': 'django' if module_name.startswith('django.') else 'user',
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
tb = tb.tb_next
return frames
def format_exception(self):
"""
Return the same data as from traceback.format_exception.
"""
import traceback
frames = self.get_traceback_frames()
tb = [(f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames]
list = ['Traceback (most recent call last):\n']
list += traceback.format_list(tb)
list += traceback.format_exception_only(self.exc_type, self.exc_value)
return list
def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
error_url = exception.args[0]['path']
except (IndexError, TypeError, KeyError):
error_url = request.path_info[1:] # Trim leading slash
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if (not tried # empty URLconf
or (request.path == '/'
and len(tried) == 1 # default URLconf
and len(tried[0]) == 1
and getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin')):
return default_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
caller = ''
try:
resolver_match = resolve(request.path)
except Resolver404:
pass
else:
obj = resolver_match.func
if hasattr(obj, '__name__'):
caller = obj.__name__
elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):
caller = obj.__class__.__name__
if hasattr(obj, '__module__'):
module = obj.__module__
caller = '%s.%s' % (module, caller)
t = Template(TECHNICAL_404_TEMPLATE, name='Technical 404 template')
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': error_url,
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
'raising_view_name': caller,
})
return HttpResponseNotFound(t.render(c), content_type='text/html')
def default_urlconf(request):
"Create an empty URLconf 404 error response."
t = Template(DEFAULT_URLCONF_TEMPLATE, name='Default URLconf template')
c = Context({
"title": _("Welcome to Django"),
"heading": _("It worked!"),
"subheading": _("Congratulations on your first Django-powered page."),
"instructions": _("Of course, you haven't actually done any work yet. "
"Next, start your first app by running <code>python manage.py startapp [app_label]</code>."),
"explanation": _("You're seeing this message because you have <code>DEBUG = True</code> in your "
"Django settings file and you haven't configured any URLs. Get to work!"),
})
return HttpResponse(t.render(c), content_type='text/html')
#
# Templates are embedded in the file so that we know the error handler will
# always work even if the template loader is broken.
#
TECHNICAL_500_TEMPLATE = ("""
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}"""
"""{% if request %} at {{ request.path_info|escape }}{% endif %}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
code, pre { font-size: 100%; white-space: pre-wrap; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th {
padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
font-weight:normal; font-size:11px; border:1px solid #ddd;
}
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%; }
table td.code pre { overflow:hidden; }
table.source th { color:#666; }
table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; color: #222; }
ul.traceback li.frame { padding-bottom:1em; color:#666; }
ul.traceback li.user { background-color:#e0e0e0; color:#000 }
div.context { padding:10px 0; overflow:hidden; }
div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; }
div.context ol li pre { display:inline; }
div.context ol.context-line li { color:#505050; background-color:#dfdfdf; }
div.context ol.context-line li span { position:absolute; right:32px; }
.user div.context ol.context-line li { background-color:#bbb; color:#000; }
.user div.context ol li { color:#666; }
div.commands { margin-left: 40px; }
div.commands a { color:#555; text-decoration:none; }
.user div.commands a { color: black; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 0 20px; }
#unicode-hint { background:#eee; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
h2 span.commands { font-size:.7em;}
span.commands a:link {color:#5E5694;}
pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; }
</style>
{% if not is_email %}
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
hideAll(getElementsByClassName(document, 'div', 'pastebin'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block': 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.innerHTML = s.innerHTML == uarr ? darr : uarr;
return false;
}
function switchPastebinFriendly(link) {
s1 = "Switch to copy-and-paste view";
s2 = "Switch back to interactive view";
link.innerHTML = link.innerHTML == s1 ? s2: s1;
toggle('browserTraceback', 'pastebinTraceback');
return false;
}
//-->
</script>
{% endif %}
</head>
<body>
<div id="summary">
<h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}"""
"""{% if request %} at {{ request.path_info|escape }}{% endif %}</h1>
<pre class="exception_value">"""
"""{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception message supplied{% endif %}"""
"""</pre>
<table class="meta">
{% if request %}
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% endif %}
<tr>
<th>Django Version:</th>
<td>{{ django_version_info }}</td>
</tr>
{% if exception_type %}
<tr>
<th>Exception Type:</th>
<td>{{ exception_type }}</td>
</tr>
{% endif %}
{% if exception_type and exception_value %}
<tr>
<th>Exception Value:</th>
<td><pre>{{ exception_value|force_escape }}</pre></td>
</tr>
{% endif %}
{% if lastframe %}
<tr>
<th>Exception Location:</th>
<td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td>
</tr>
{% endif %}
<tr>
<th>Python Executable:</th>
<td>{{ sys_executable|escape }}</td>
</tr>
<tr>
<th>Python Version:</th>
<td>{{ sys_version_info }}</td>
</tr>
<tr>
<th>Python Path:</th>
<td><pre>{{ sys_path|pprint }}</pre></td>
</tr>
<tr>
<th>Server time:</th>
<td>{{server_time|date:"r"}}</td>
</tr>
</table>
</div>
{% if unicode_hint %}
<div id="unicode-hint">
<h2>Unicode error hint</h2>
<p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p>
</div>
{% endif %}
{% if template_does_not_exist %}
<div id="template-not-exist">
<h2>Template-loader postmortem</h2>
{% if loader_debug_info %}
<p>Django tried loading these templates, in this order:</p>
<ul>
{% for loader in loader_debug_info %}
<li>Using loader <code>{{ loader.loader }}</code>:
<ul>
{% for t in loader.templates %}<li><code>{{ t.name }}</code> ({{ t.status }})</li>{% endfor %}
</ul>
</li>
{% endfor %}
</ul>
{% else %}
<p>Django couldn't find any templates because your <code>TEMPLATE_LOADERS</code> setting is empty!</p>
{% endif %}
</div>
{% endif %}
{% if template_info %}
<div id="template">
<h2>Error during template rendering</h2>
<p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p>
<h3>{{ template_info.message }}</h3>
<table class="source{% if template_info.top %} cut-top{% endif %}
{% ifnotequal template_info.bottom template_info.total %} cut-bottom{% endifnotequal %}">
{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
<tr class="error"><th>{{ source_line.0 }}</th>
<td>
{{ template_info.before }}
<span class="specific">{{ template_info.during }}</span>
{{ template_info.after }}
</td>
</tr>
{% else %}
<tr><th>{{ source_line.0 }}</th>
<td>{{ source_line.1 }}</td></tr>
{% endifequal %}
{% endfor %}
</table>
</div>
{% endif %}
{% if frames %}
<div id="traceback">
<h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);">
Switch to copy-and-paste view</a></span>{% endif %}
</h2>
{% autoescape off %}
<div id="browserTraceback">
<ul class="traceback">
{% for frame in frames %}
<li class="frame {{ frame.type }}">
<code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code>
{% if frame.context_line %}
<div class="context" id="c{{ frame.id }}">
{% if frame.pre_context and not is_email %}
<ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}">
{% for line in frame.pre_context %}
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>
{% endfor %}
</ol>
{% endif %}
<ol start="{{ frame.lineno }}" class="context-line">
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>
{{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol>
{% if frame.post_context and not is_email %}
<ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}">
{% for line in frame.post_context %}
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>
{% endfor %}
</ol>
{% endif %}
</div>
{% endif %}
{% if frame.vars %}
<div class="commands">
{% if is_email %}
<h2>Local Vars</h2>
{% else %}
<a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>▶</span> Local vars</a>
{% endif %}
</div>
<table class="vars" id="v{{ frame.id }}">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in frame.vars|dictsort:"0" %}
<tr>
<td>{{ var.0|force_escape }}</td>
<td class="code"><pre>{{ var.1 }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
{% endautoescape %}
<form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post">
{% if not is_email %}
<div id="pastebinTraceback" class="pastebin">
<input type="hidden" name="language" value="PythonConsole">
<input type="hidden" name="title"
value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}">
<input type="hidden" name="source" value="Django Dpaste Agent">
<input type="hidden" name="poster" value="Django">
<textarea name="content" id="traceback_area" cols="140" rows="25">
Environment:
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri|escape }}
{% endif %}
Django Version: {{ django_version_info }}
Python Version: {{ sys_version_info }}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template Loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} ({{ t.status }})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your TEMPLATE_LOADERS setting is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}
Traceback:
{% for frame in frames %}File "{{ frame.filename|escape }}" in {{ frame.function|escape }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}
{% endfor %}
Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}
Exception Value: {{ exception_value|force_escape }}
</textarea>
<br><br>
<input type="submit" value="Share this traceback on a public Web site">
</div>
</form>
</div>
{% endif %}
{% endif %}
<div id="requestinfo">
<h2>Request information</h2>
{% if request %}
<h3 id="get-info">GET</h3>
{% if request.GET %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.GET.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No GET data</p>
{% endif %}
<h3 id="post-info">POST</h3>
{% if filtered_POST %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in filtered_POST.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No POST data</p>
{% endif %}
<h3 id="files-info">FILES</h3>
{% if request.FILES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.FILES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No FILES data</p>
{% endif %}
<h3 id="cookie-info">COOKIES</h3>
{% if request.COOKIES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.COOKIES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No cookie data</p>
{% endif %}
<h3 id="meta-info">META</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.META.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>Request data not supplied</p>
{% endif %}
<h3 id="settings-info">Settings</h3>
<h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4>
<table class="req">
<thead>
<tr>
<th>Setting</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in settings.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% if not is_email %}
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in your
Django settings file. Change that to <code>False</code>, and Django will
display a standard page generated by the handler for this status code.
</p>
</div>
{% endif %}
</body>
</html>
""")
TECHNICAL_500_TEXT_TEMPLATE = """{% firstof exception_type 'Report' %}{% if request %} at {{ request.path_info }}{% endif %}
{% firstof exception_value 'No exception message supplied' %}
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri }}{% endif %}
Django Version: {{ django_version_info }}
Python Executable: {{ sys_executable }}
Python Version: {{ sys_version_info }}
Python Path: {{ sys_path }}
Server time: {{server_time|date:"r"}}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} ({{ t.status }})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your TEMPLATE_LOADERS setting is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}{% if frames %}
Traceback:
{% for frame in frames %}File "{{ frame.filename }}" in {{ frame.function }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line }}{% endif %}
{% endfor %}
{% if exception_type %}Exception Type: {{ exception_type }}{% if request %} at {{ request.path_info }}{% endif %}
{% if exception_value %}Exception Value: {{ exception_value }}{% endif %}{% endif %}{% endif %}
{% if request %}Request information:
GET:{% for k, v in request.GET.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No GET data{% endfor %}
POST:{% for k, v in filtered_POST.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No POST data{% endfor %}
FILES:{% for k, v in request.FILES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No FILES data{% endfor %}
COOKIES:{% for k, v in request.COOKIES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No cookie data{% endfor %}
META:{% for k, v in request.META.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% else %}Request data not supplied
{% endif %}
Settings:
Using settings module {{ settings.SETTINGS_MODULE }}{% for k, v in settings.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
You're seeing this error because you have DEBUG = True in your
Django settings file. Change that to False, and Django will
display a standard page generated by the handler for this status code.
"""
TECHNICAL_404_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Page not found at {{ request.path_info|escape }}</title>
<meta name="robots" content="NONE,NOARCHIVE">
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
table { border:none; border-collapse: collapse; width:100%; }
td, th { vertical-align:top; padding:2px 3px; }
th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#info { background:#f6f6f6; }
#info ol { margin: 0.5em 4em; }
#info ol li { font-family: monospace; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>Page not found <span>(404)</span></h1>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% if raising_view_name %}
<tr>
<th>Raised by:</th>
<td>{{ raising_view_name }}</td>
</tr>
{% endif %}
</table>
</div>
<div id="info">
{% if urlpatterns %}
<p>
Using the URLconf defined in <code>{{ urlconf }}</code>,
Django tried these URL patterns, in this order:
</p>
<ol>
{% for pattern in urlpatterns %}
<li>
{% for pat in pattern %}
{{ pat.regex.pattern }}
{% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %}
{% endfor %}
</li>
{% endfor %}
</ol>
<p>The current URL, <code>{{ request_path|escape }}</code>, didn't match any of these.</p>
{% else %}
<p>{{ reason }}</p>
{% endif %}
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in
your Django settings file. Change that to <code>False</code>, and Django
will display a standard 404 page.
</p>
</div>
</body>
</html>
"""
DEFAULT_URLCONF_TEMPLATE = """
<!DOCTYPE html>
<html lang="en"><head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE"><title>{{ title }}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th {
padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
font-weight:normal; font-size:11px; border:1px solid #ddd;
}
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#summary { background: #e0ebff; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#instructions { background:#f6f6f6; }
#summary table { border:none; background:transparent; }
</style>
</head>
<body>
<div id="summary">
<h1>{{ heading }}</h1>
<h2>{{ subheading }}</h2>
</div>
<div id="instructions">
<p>
{{ instructions|safe }}
</p>
</div>
<div id="explanation">
<p>
{{ explanation|safe }}
</p>
</div>
</body></html>
"""
| {
"content_hash": "499c40a3684556101bc3ad5196fbe0d4",
"timestamp": "",
"source": "github",
"line_count": 1264,
"max_line_length": 124,
"avg_line_length": 36.9248417721519,
"alnum_prop": 0.5714867268013627,
"repo_name": "gdi2290/django",
"id": "09cc5c462efce689da7d9033630c8ab3324ec763",
"size": "46673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/views/debug.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53429"
},
{
"name": "JavaScript",
"bytes": "103687"
},
{
"name": "Makefile",
"bytes": "5765"
},
{
"name": "Python",
"bytes": "10365282"
},
{
"name": "Shell",
"bytes": "10452"
}
],
"symlink_target": ""
} |
import os, tweepy, time, sys, json, requests, random, imp, datetime, schedule, time, random
def twit_auth():
# Authenticate the twitter session.
# Should only be needed once at the initiation of the code.
CONSUMER_KEY = os.environ['CONSUMER_KEY']
CONSUMER_SECRET = os.environ['CONSUMER_SECRET']
ACCESS_KEY = os.environ['ACCESS_KEY']
ACCESS_SECRET = os.environ['ACCESS_SECRET']
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth)
print('Twitter authenticated \n')
return api
def check_neotoma():
# This function call to neotoma, reads a text file, compares the two
# and then outputs all the 'new' records to a different text file.
# Function returns the number of new records returned.
# inputs:
# 1. text file: old_results.json
# 2. text file: to_print.json
# 3. json call: neotoma
with open('old_results.json', 'r') as old_file:
old_calls = json.loads(old_file.read())
with open('to_print.json', 'r') as print_file:
to_print = json.loads(print_file.read())
neotoma = requests.get("http://ceiwin10.cei.psu.edu/NDB/RecentUploads?months=1")
inp_json = json.loads(neotoma.text)['data']
def get_datasets(x):
did = []
for y in x:
did.append(y["DatasetID"])
return did
neo_datasets = get_datasets(inp_json)
old_datasets = get_datasets(old_calls)
new_datasets = get_datasets(to_print)
# So this works
# We now have the numeric dataset IDs for the most recent month of
# new files to neotoma (neo_datasets), all the ones we've already tweeted
# (old_datasets) and all the ones in our queue (new_datasets).
#
# The next thing we want to do is to remove all the neo_datasets that
# are in old_datasets and then remove all the new_datasets that are
# in neo_datasets, append neo_datasets to new_datasets (if new_datasets
# has a length > 0) and then dump new_datasets.
#
# Old datasets gets re-written when the tweets go out.
# remove all the neo_datasets:
for i in range(len(neo_datasets)-1, 0, -1):
if neo_datasets[i] in old_datasets:
del inp_json[i]
# This now gives us a pared down version of inp_json
# Now we need to make sure to add any of the to_print to neo_dataset.
# We do this by cycling through new_datasets. Any dataset number that
# is not in old_datasets or neo_datasets gets added to the beginning of
# the new list. This way it is always the first called up when twitter
# posts:
for i in range(0, len(new_datasets)-1):
if new_datasets[i] not in old_datasets and new_datasets[i] not in neo_datasets:
inp_json.insert(0,to_print[i])
# Now write out to file. Old file doesn't get changed until the
# twitter app is run.
with open('to_print.json', 'w') as print_file:
json.dump(inp_json, print_file)
return len(inp_json) - len(to_print)
def print_neotoma_update(api):
# Check for new records by using the neotoma "recent" API:
old_toprint = check_neotoma()
# load files:
with open('to_print.json', 'r') as print_file:
to_print = json.loads(print_file.read())
with open('old_results.json', 'r') as print_file:
old_files = json.loads(print_file.read())
print('Neotoma dataset updated.\n')
if (old_toprint) == 1:
# If only a single site has been added:
line = "I've got a backlog of " + str(len(to_print)) + " sites to tweet and " + str(old_toprint) + " site has been added since I last checked Neotoma. http://neotomadb.org"
elif (old_toprint) > 1:
line = "I've got a backlog of " + str(len(to_print)) + " sites to tweet and " + str(old_toprint) + " sites have been added since I last checked Neotoma. http://neotomadb.org"
else:
line = "I've got a backlog of " + str(len(to_print)) + " sites to tweet. Nothing new has been added since I last checked. http://neotomadb.org"
print('%s' % line)
try:
print('%s' % line)
api.update_status(status=line)
except tweepy.error.TweepError:
print("Twitter error raised")
def post_tweet(api):
# Read in the printable tweets:
with open('to_print.json', 'r') as print_file:
to_print = json.loads(print_file.read())
with open('old_results.json', 'r') as print_file:
old_files = json.loads(print_file.read())
print('Files opened\n')
pr_tw = random.randint(0,len(to_print) - 1)
site = to_print[pr_tw]
# Get ready to print the first [0] record in to_print:
weblink = 'http://apps.neotomadb.org/Explorer/?datasetid=' + str(site["DatasetID"])
# The datasets have long names. I want to match to simplify:
line = 'Neotoma welcomes ' + site["SiteName"] + ', a ' + site["DatasetType"] + ' dataset by ' + site["Investigator"] + " " + weblink
# There's a few reasons why the name might be very long, one is the site name, the other is the author name:
if len(line) > 170:
line = 'Neotoma welcomes ' + site["SiteName"] + " by " + site["Investigator"] + " " + weblink
# If it's still too long then clip the author list:
if len(line) > 170 & site["Investigator"].find(','):
author = site["Investigator"][0:to_print[0]["Investigator"].find(',')]
line = 'Neotoma welcomes ' + site["SiteName"] + " by " + author + " et al. " + weblink
try:
print('%s' % line)
api.update_status(status=line)
old_files.append(site)
del to_print[pr_tw]
with open('to_print.json', 'w') as print_file:
json.dump(to_print, print_file)
with open('old_results.json', 'w') as print_file:
json.dump(old_files, print_file)
except tweepy.error.TweepError:
print("Twitter error raised")
def self_identify(api):
# Identify myself as the owner of the bot:
line = 'This twitter bot for the Neotoma Paleoecological Database is managed by @sjgoring. Letting you know what\'s new at http://neotomadb.org'
try:
print('%s' % line)
api.update_status(status=line)
except tweepy.error.TweepError:
print("Twitter error raised")
def self_identify_hub(api):
# Identify the codebase for the bot:
line = 'This twitter bot for the Neotoma Paleoecological Database is programmed in #python and publicly available through an MIT License on GitHub: https://github.com/SimonGoring/neotomabot'
try:
print('%s' % line)
api.update_status(status=line)
except tweepy.error.TweepError:
print("Twitter error raised")
def other_inf_hub(api):
# Identify the codebase for the bot:
line = ['The bot for the Neotoma Database is programmed in #python and publicly available through an MIT License on GitHub: https://github.com/SimonGoring/neotomabot',
'Neotoma has teaching modules you can use in the class room, check it out: https://www.neotomadb.org/education/category/higher_ed/',
'The governance for Neotoma includes representatives from our constituent databases. Find out more: https://www.neotomadb.org/about/category/governance',
'We are invested in #cyberinfrastructure. Our response to emerging challenges is posted on @authorea: https://www.authorea.com/users/152134/articles/165940-cyberinfrastructure-in-the-paleosciences-mobilizing-long-tail-data-building-distributed-community-infrastructure-empowering-individual-geoscientists',
'We keep a list of all publications that have used Neotoma for their research. Want to be added? Contact us! https://www.neotomadb.org/references',
'These days everyone\'s got a Google Scholar page. So does Neotoma! https://scholar.google.ca/citations?user=idoixqkAAAAJ&hl=en',
'If you use #rstats then you can access Neotoma data directly thanks to @rOpenSci! https://ropensci.org/tutorials/neotoma_tutorial.html',
'Neotoma is more than just pollen & mammals; it contains 28 data types incl phytoliths & biochemistry data. Explore! https://www.neotomadb.org/data/category/explorer',
'Think you\'ve got better tweets? Add them to my code & make a pull request! https://github.com/SimonGoring/neotomabot',
'Behold, the very first Neotoma dataset, ID 1: https://apps.neotomadb.org/explorer/?datasetid=1',
'We\'ve got some new R tutorials up online. Is there anything you\'d like to do with Neotoma? http://neotomadb.github.io',
'Neotoma is a member of the @ICSU_WDS, working to share best practices for data stewardship.',
'Are you presenting at an upcoming meeting? Will you be talking about Neotoma? Let us know and we can help get the word out! Contact @sjgoring',
'You know you want to slide into these mentions. . . Let us know what cool #pollen, #paleoecology, #archaeology, #whatever you\'re doing with Neotoma data!',
'Referencing Neotoma? Why not check out our Quaternary Research paper? https://doi.org/10.1017/qua.2017.105',
'How is Neotoma leveraging text mining to improve its data holdings? Find out on the @earthcube blog: https://earthcube.wordpress.com/2018/03/06/geodeepdive-into-darkdata/',
"Building an application that could leverage Neotoma data? Our API (https://api-dev.neotomadb.org) is public and open: https://github.com/NeotomaDB/api_nodetest/",
"The landing pages for Neotoma were built using Vue.js, all code is published on Github at https://github.com/NeotomaDB/ndbLandingPage",
"Learn more about how Neotoma makes the most of teaching and cutting-edge research in a new publication in Elements of Paleontology: http://dx.doi.org/10.1017/9781108681582",
"Neotoma is on Slack. Come join the discussion and get involved! We're looking for folks to help with documentation, stewardship and coding. https://join.slack.com/t/neotomadb/shared_invite/zt-cvsv53ep-wjGeCTkq7IhP6eUNA9NxYQ"
]
try:
print('%s' % line)
api.update_status(status=line[random.randint(0,len(line))])
except tweepy.error.TweepError:
print("Twitter error raised")
api = twit_auth()
schedule.every(3).hours.do(post_tweet, api)
schedule.every().day.at("15:37").do(print_neotoma_update, api)
schedule.every().wednesday.at("14:30").do(self_identify, api)
schedule.every().monday.at("14:30").do(self_identify_hub, api)
schedule.every().day.at("10:30").do(other_inf_hub, api)
while 1:
schedule.run_pending()
time.sleep(61)
| {
"content_hash": "0e52ab678e7fe1a456418d0d8f470f00",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 316,
"avg_line_length": 49.751196172248804,
"alnum_prop": 0.6847470667436045,
"repo_name": "SimonGoring/NeotomaBot",
"id": "8381a6213cc3d3908ef643042504dbfa7fccd3b8",
"size": "10455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neotomabot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "12648"
},
{
"name": "Python",
"bytes": "10219"
}
],
"symlink_target": ""
} |
from graphviz import Digraph
import argparse
import os
import pydot
import sys
import warnings
def gen_graph_from_gv(ifile, odir, oformat="png"):
(graph,) = pydot.graph_from_dot_file(ifile)
gen_graph_func = getattr(graph, "write_" + oformat)
filename = os.path.basename(ifile)
ofile = odir + "/" + os.path.splitext(filename)[0] + "." + oformat
gen_graph_func(ofile)
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-i', "--infile", action="append",
help="graphviz file path")
parser.add_argument('-o', '--outdir',
help='sum the integers (default: find the max)')
parser.add_argument('-f', '--outformat', default="png",
help='output image format (default: png)')
args = parser.parse_args()
# Image source directory
img_src_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
img_files = []
if args.infile:
for f in args.infile:
if not os.path.isfile(f):
f = img_src_dir + "/" + f
if not os.path.isfile(f):
warnings.warn("Input file: " + f + " doesn't exist.")
else:
img_files.append(f)
else:
for f in os.listdir(img_src_dir):
if f.endswith(".gv"):
img_files.append(img_src_dir + "/" + f)
if not img_files:
sys.exist("ERROR: no found image files.")
oformat = args.outformat
if args.outdir:
odir = args.outdir
if not os.path.isdir(odir):
sys.exit("--outdir " + odir + "doesn't exist")
else:
odir = os.path.dirname(img_src_dir) + "/img"
for f in img_files:
print("Generating " + oformat + " for " + f + " ...")
gen_graph_from_gv(f, odir, oformat)
| {
"content_hash": "5a780901d5c181f17c943689c5ebb381",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 70,
"avg_line_length": 30.071428571428573,
"alnum_prop": 0.6098574821852731,
"repo_name": "explora26/zephyr",
"id": "28871b4a295f34b8ee7f70fb9d52cf32705b2018",
"size": "1684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ext/lib/ipc/open-amp/open-amp/docs/img-src/gen-graph.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1293047"
},
{
"name": "Batchfile",
"bytes": "110"
},
{
"name": "C",
"bytes": "340251497"
},
{
"name": "C++",
"bytes": "3179665"
},
{
"name": "CMake",
"bytes": "531524"
},
{
"name": "EmberScript",
"bytes": "793"
},
{
"name": "Makefile",
"bytes": "3313"
},
{
"name": "Objective-C",
"bytes": "34223"
},
{
"name": "Perl",
"bytes": "202106"
},
{
"name": "Python",
"bytes": "909223"
},
{
"name": "Shell",
"bytes": "42672"
},
{
"name": "Verilog",
"bytes": "6394"
}
],
"symlink_target": ""
} |
"""Self-test suite for Crypto.Hash.keccak"""
import unittest
from binascii import hexlify, unhexlify
from Crypto.SelfTest.loader import load_tests
from Crypto.SelfTest.st_common import list_test_cases
from StringIO import StringIO
from Crypto.Hash import keccak
from Crypto.Util.py3compat import b, tobytes, bchr
class KeccakTest(unittest.TestCase):
def test_new_positive(self):
for digest_bits in (224, 256, 384, 512):
hobj = keccak.new(digest_bits=digest_bits)
self.assertEqual(hobj.digest_size, digest_bits // 8)
hobj2 = hobj.new()
self.assertEqual(hobj2.digest_size, digest_bits // 8)
for digest_bytes in (28, 32, 48, 64):
hobj = keccak.new(digest_bytes=digest_bytes)
self.assertEqual(hobj.digest_size, digest_bytes)
hobj2 = hobj.new()
self.assertEqual(hobj2.digest_size, digest_bytes)
def test_new_positive2(self):
digest1 = keccak.new(data=b("\x90"), digest_bytes=64).digest()
digest2 = keccak.new(digest_bytes=64).update(b("\x90")).digest()
self.assertEqual(digest1, digest2)
def test_new_negative(self):
# keccak.new needs digest size
self.assertRaises(TypeError, keccak.new)
h = keccak.new(digest_bits=512)
# Either bits or bytes can be specified
self.assertRaises(TypeError, keccak.new,
digest_bytes=64,
digest_bits=512)
# Range
self.assertRaises(ValueError, keccak.new, digest_bytes=0)
self.assertRaises(ValueError, keccak.new, digest_bytes=1)
self.assertRaises(ValueError, keccak.new, digest_bytes=65)
self.assertRaises(ValueError, keccak.new, digest_bits=0)
self.assertRaises(ValueError, keccak.new, digest_bits=1)
self.assertRaises(ValueError, keccak.new, digest_bits=513)
def test_update(self):
pieces = [bchr(10) * 200, bchr(20) * 300]
h = keccak.new(digest_bytes=64)
h.update(pieces[0]).update(pieces[1])
digest = h.digest()
h = keccak.new(digest_bytes=64)
h.update(pieces[0] + pieces[1])
self.assertEqual(h.digest(), digest)
def test_update_negative(self):
h = keccak.new(digest_bytes=64)
self.assertRaises(TypeError, h.update, u"string")
def test_digest(self):
h = keccak.new(digest_bytes=64)
digest = h.digest()
# hexdigest does not change the state
self.assertEqual(h.digest(), digest)
# digest returns a byte string
self.failUnless(isinstance(digest, type(b("digest"))))
def test_hex_digest(self):
mac = keccak.new(digest_bits=512)
digest = mac.digest()
hexdigest = mac.hexdigest()
# hexdigest is equivalent to digest
self.assertEqual(hexlify(digest), tobytes(hexdigest))
# hexdigest does not change the state
self.assertEqual(mac.hexdigest(), hexdigest)
# hexdigest returns a string
self.failUnless(isinstance(hexdigest, type("digest")))
def test_update_after_digest(self):
msg=b("rrrrttt")
# Normally, update() cannot be done after digest()
h = keccak.new(digest_bits=512, data=msg[:4])
dig1 = h.digest()
self.assertRaises(TypeError, h.update, msg[4:])
dig2 = keccak.new(digest_bits=512, data=msg).digest()
# With the proper flag, it is allowed
h = keccak.new(digest_bits=512, data=msg[:4], update_after_digest=True)
self.assertEquals(h.digest(), dig1)
# ... and the subsequent digest applies to the entire message
# up to that point
h.update(msg[4:])
self.assertEquals(h.digest(), dig2)
class KeccakVectors(unittest.TestCase):
pass
# TODO: add ExtremelyLong tests
test_vectors_224 = load_tests(("Crypto", "SelfTest", "Hash", "test_vectors", "keccak"),
"ShortMsgKAT_224.txt",
"Short Messages KAT 224",
{ "len" : lambda x: int(x) } )
test_vectors_224 += load_tests(("Crypto", "SelfTest", "Hash", "test_vectors", "keccak"),
"LongMsgKAT_224.txt",
"Long Messages KAT 224",
{ "len" : lambda x: int(x) } )
for idx, tv in enumerate(test_vectors_224):
if tv.len == 0:
data = b("")
else:
data = tobytes(tv.msg)
def new_test(self, data=data, result=tv.md):
hobj = keccak.new(digest_bits=224, data=data)
self.assertEqual(hobj.digest(), result)
setattr(KeccakVectors, "test_224_%d" % idx, new_test)
# ---
test_vectors_256 = load_tests(("Crypto", "SelfTest", "Hash", "test_vectors", "keccak"),
"ShortMsgKAT_256.txt",
"Short Messages KAT 256",
{ "len" : lambda x: int(x) } )
test_vectors_256 += load_tests(("Crypto", "SelfTest", "Hash", "test_vectors", "keccak"),
"LongMsgKAT_256.txt",
"Long Messages KAT 256",
{ "len" : lambda x: int(x) } )
for idx, tv in enumerate(test_vectors_256):
if tv.len == 0:
data = b("")
else:
data = tobytes(tv.msg)
def new_test(self, data=data, result=tv.md):
hobj = keccak.new(digest_bits=256, data=data)
self.assertEqual(hobj.digest(), result)
setattr(KeccakVectors, "test_256_%d" % idx, new_test)
# ---
test_vectors_384 = load_tests(("Crypto", "SelfTest", "Hash", "test_vectors", "keccak"),
"ShortMsgKAT_384.txt",
"Short Messages KAT 384",
{ "len" : lambda x: int(x) } )
test_vectors_384 += load_tests(("Crypto", "SelfTest", "Hash", "test_vectors", "keccak"),
"LongMsgKAT_384.txt",
"Long Messages KAT 384",
{ "len" : lambda x: int(x) } )
for idx, tv in enumerate(test_vectors_384):
if tv.len == 0:
data = b("")
else:
data = tobytes(tv.msg)
def new_test(self, data=data, result=tv.md):
hobj = keccak.new(digest_bits=384, data=data)
self.assertEqual(hobj.digest(), result)
setattr(KeccakVectors, "test_384_%d" % idx, new_test)
# ---
test_vectors_512 = load_tests(("Crypto", "SelfTest", "Hash", "test_vectors", "keccak"),
"ShortMsgKAT_512.txt",
"Short Messages KAT 512",
{ "len" : lambda x: int(x) } )
test_vectors_512 += load_tests(("Crypto", "SelfTest", "Hash", "test_vectors", "keccak"),
"LongMsgKAT_512.txt",
"Long Messages KAT 512",
{ "len" : lambda x: int(x) } )
for idx, tv in enumerate(test_vectors_512):
if tv.len == 0:
data = b("")
else:
data = tobytes(tv.msg)
def new_test(self, data=data, result=tv.md):
hobj = keccak.new(digest_bits=512, data=data)
self.assertEqual(hobj.digest(), result)
setattr(KeccakVectors, "test_512_%d" % idx, new_test)
def get_tests(config={}):
tests = []
tests += list_test_cases(KeccakTest)
tests += list_test_cases(KeccakVectors)
return tests
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
| {
"content_hash": "a843a6ad8451f522970118c574b018e0",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 88,
"avg_line_length": 34.502262443438916,
"alnum_prop": 0.5589508196721311,
"repo_name": "ininex/geofire-python",
"id": "4fce1782666b62940b9d0f65857c8b9d8725f82b",
"size": "9138",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "resource/lib/python2.7/site-packages/Crypto/SelfTest/Hash/test_keccak.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "6231"
},
{
"name": "JavaScript",
"bytes": "6187"
},
{
"name": "Protocol Buffer",
"bytes": "158375"
},
{
"name": "Python",
"bytes": "13368780"
},
{
"name": "Shell",
"bytes": "5031"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from pixelpuncher.enemy.models import EnemyCategory, EnemyType, Enemy, EnemySpawn
class EnemyCategoryModelAdmin(admin.ModelAdmin):
list_display = ("code", "name", )
search_fields = ("code", "name", )
list_per_page = 25
class EnemyTypeCategoryModelAdmin(admin.ModelAdmin):
list_display = ("id", "name", "category", "xp", "base_level", "maximum_health", "date_created",)
search_fields = ("id", "name", "category", "xp", "base_level", "date_created", )
list_per_page = 50
class EnemyModelAdmin(admin.ModelAdmin):
list_display = ("id", "enemy_type", "active", "player", "current_health", "date_created",)
search_fields = ("id", "current_health", "active", "date_created",)
list_per_page = 50
class EnemySpawnModelAdmin(admin.ModelAdmin):
list_display = ("id", "enemy_type", "location", "spawn_rate", )
search_fields = ("id", "enemy_type", "location", "spawn_rate",)
list_per_page = 50
admin.site.register(EnemyCategory, EnemyCategoryModelAdmin)
admin.site.register(EnemyType, EnemyTypeCategoryModelAdmin)
admin.site.register(Enemy, EnemyModelAdmin)
admin.site.register(EnemySpawn, EnemySpawnModelAdmin)
| {
"content_hash": "73adfa362430e5ead51ad866e067143f",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 100,
"avg_line_length": 37.1875,
"alnum_prop": 0.7,
"repo_name": "ej2/pixelpuncher",
"id": "8efc939a20d4172e3cbbacf02c693b332e3fe173",
"size": "1190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pixelpuncher/enemy/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "155880"
},
{
"name": "HTML",
"bytes": "108414"
},
{
"name": "JavaScript",
"bytes": "29178"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "282954"
},
{
"name": "Shell",
"bytes": "4535"
}
],
"symlink_target": ""
} |
import gym
from random import choice
import unittest
import ray
import ray.rllib.agents.a3c as a3c
import ray.rllib.agents.dqn as dqn
import ray.rllib.agents.pg as pg
from ray.rllib.examples.env.multi_agent import MultiAgentCartPole
from ray.rllib.utils.test_utils import framework_iterator
class TestTrainer(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init()
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_add_delete_policy(self):
env = gym.make("CartPole-v0")
config = pg.DEFAULT_CONFIG.copy()
config.update({
"env": MultiAgentCartPole,
"env_config": {
"config": {
"num_agents": 4,
},
},
"multiagent": {
# Start with a single policy.
"policies": {
"p0": (None, env.observation_space, env.action_space, {}),
},
"policy_mapping_fn": lambda aid, episode, **kwargs: "p0",
},
})
for _ in framework_iterator(config):
trainer = pg.PGTrainer(config=config)
r = trainer.train()
self.assertTrue("p0" in r["policy_reward_min"])
for i in range(1, 4):
def new_mapping_fn(agent_id, episode, **kwargs):
return f"p{choice([i, i - 1])}"
# Add a new policy.
new_pol = trainer.add_policy(
f"p{i}",
trainer._policy_class,
observation_space=env.observation_space,
action_space=env.action_space,
config={},
# Test changing the mapping fn.
policy_mapping_fn=new_mapping_fn,
# Change the list of policies to train.
policies_to_train=[f"p{i}", f"p{i-1}"],
)
pol_map = trainer.workers.local_worker().policy_map
self.assertTrue(new_pol is not trainer.get_policy("p0"))
self.assertTrue("p0" in pol_map)
self.assertTrue("p1" in pol_map)
self.assertTrue(len(pol_map) == i + 1)
r = trainer.train()
self.assertTrue("p1" in r["policy_reward_min"])
# Delete all added policies again from trainer.
for i in range(3, 0, -1):
trainer.remove_policy(
f"p{i}",
policy_mapping_fn=lambda aid, eps, **kwargs: f"p{i - 1}",
policies_to_train=[f"p{i - 1}"])
trainer.stop()
def test_evaluation_option(self):
config = dqn.DEFAULT_CONFIG.copy()
config.update({
"env": "CartPole-v0",
"evaluation_interval": 2,
"evaluation_num_episodes": 2,
"evaluation_config": {
"gamma": 0.98,
}
})
for _ in framework_iterator(config, frameworks=("tf", "torch")):
trainer = dqn.DQNTrainer(config=config)
# Given evaluation_interval=2, r0, r2, r4 should not contain
# evaluation metrics, while r1, r3 should.
r0 = trainer.train()
print(r0)
r1 = trainer.train()
print(r1)
r2 = trainer.train()
print(r2)
r3 = trainer.train()
print(r3)
trainer.stop()
self.assertFalse("evaluation" in r0)
self.assertTrue("evaluation" in r1)
self.assertFalse("evaluation" in r2)
self.assertTrue("evaluation" in r3)
self.assertTrue("episode_reward_mean" in r1["evaluation"])
self.assertNotEqual(r1["evaluation"], r3["evaluation"])
def test_evaluation_wo_evaluation_worker_set(self):
config = a3c.DEFAULT_CONFIG.copy()
config.update({
"env": "CartPole-v0",
# Switch off evaluation (this should already be the default).
"evaluation_interval": None,
})
for _ in framework_iterator(frameworks=("tf", "torch")):
# Setup trainer w/o evaluation worker set and still call
# evaluate() -> Expect error.
trainer_wo_env_on_driver = a3c.A3CTrainer(config=config)
self.assertRaisesRegexp(
ValueError, "Cannot evaluate w/o an evaluation worker set",
trainer_wo_env_on_driver.evaluate)
trainer_wo_env_on_driver.stop()
# Try again using `create_env_on_driver=True`.
# This force-adds the env on the local-worker, so this Trainer
# can `evaluate` even though, it doesn't have an evaluation-worker
# set.
config["create_env_on_driver"] = True
trainer_w_env_on_driver = a3c.A3CTrainer(config=config)
results = trainer_w_env_on_driver.evaluate()
assert "evaluation" in results
assert "episode_reward_mean" in results["evaluation"]
trainer_w_env_on_driver.stop()
config["create_env_on_driver"] = False
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"content_hash": "e09d6f1613650f15fc62f84f15bfd63f",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 78,
"avg_line_length": 36.638888888888886,
"alnum_prop": 0.525587566338135,
"repo_name": "pcmoritz/ray-1",
"id": "2e3f5afe87d7c8ebc7d4805a95f28a9774398f6a",
"size": "5276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/agents/tests/test_trainer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "70670"
},
{
"name": "C++",
"bytes": "4670851"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Dockerfile",
"bytes": "14159"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1338604"
},
{
"name": "JavaScript",
"bytes": "914"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "10523389"
},
{
"name": "Shell",
"bytes": "117557"
},
{
"name": "Smarty",
"bytes": "239"
},
{
"name": "Starlark",
"bytes": "238506"
},
{
"name": "TypeScript",
"bytes": "259269"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from concurrent.futures import as_completed
from ..maya import Executor
def get_type(node):
from maya import cmds
return node, cmds.nodeType(node)
def submit():
from maya import cmds
executor = Executor(clone_environ=True, create_tempfile=True)
with executor.batch("QBFutures Example: Get Node Types") as batch:
for node in cmds.ls(sl=True):
future = batch.submit_ext(get_type, [node], name='nodeType(%r)' % node)
for future in as_completed(batch.futures):
print future.job_id, future.work_id, future.result()
def fail():
raise ValueError('testing failure')
if __name__ == '__main__':
executor = Executor(name="Maya Exeception Test")
executor.submit('qbfutures.test.maya:fail').result()
| {
"content_hash": "f94b5ac9b497df5121fae2504dfc617d",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 83,
"avg_line_length": 25.21875,
"alnum_prop": 0.6753407682775713,
"repo_name": "westernx/qbfutures",
"id": "d1643973ce9b00bea3e5de5f37498a09ebd97395",
"size": "807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qbfutures/test/maya.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "38146"
}
],
"symlink_target": ""
} |
from sklearn2sql_heroku.tests.regression import generic as reg_gen
reg_gen.test_model("DecisionTreeRegressor" , "RandomReg_500" , "oracle")
| {
"content_hash": "2366d69c1d226cad6dea19bd8d016cc5",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 72,
"avg_line_length": 35.5,
"alnum_prop": 0.7816901408450704,
"repo_name": "antoinecarme/sklearn2sql_heroku",
"id": "0292f6192af33687b5db7bdc2826d5ff71dcd9d5",
"size": "142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/regression/RandomReg_500/ws_RandomReg_500_DecisionTreeRegressor_oracle_code_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "507043"
},
{
"name": "Procfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "1021137"
},
{
"name": "R",
"bytes": "2521"
}
],
"symlink_target": ""
} |
import logging
from io import BytesIO
from .psparser import PSStackParser
from .psparser import PSSyntaxError
from .psparser import PSEOF
from .psparser import KWD
from . import settings
from .pdftypes import PDFException
from .pdftypes import PDFStream
from .pdftypes import PDFObjRef
from .pdftypes import int_value
from .pdftypes import dict_value
log = logging.getLogger(__name__)
class PDFSyntaxError(PDFException):
pass
class PDFParser(PSStackParser):
"""
PDFParser fetch PDF objects from a file stream.
It can handle indirect references by referring to
a PDF document set by set_document method.
It also reads XRefs at the end of every PDF file.
Typical usage:
parser = PDFParser(fp)
parser.read_xref()
parser.read_xref(fallback=True) # optional
parser.set_document(doc)
parser.seek(offset)
parser.nextobject()
"""
def __init__(self, fp):
PSStackParser.__init__(self, fp)
self.doc = None
self.fallback = False
return
def set_document(self, doc):
"""Associates the parser with a PDFDocument object."""
self.doc = doc
return
KEYWORD_R = KWD(b'R')
KEYWORD_NULL = KWD(b'null')
KEYWORD_ENDOBJ = KWD(b'endobj')
KEYWORD_STREAM = KWD(b'stream')
KEYWORD_XREF = KWD(b'xref')
KEYWORD_STARTXREF = KWD(b'startxref')
def do_keyword(self, pos, token):
"""Handles PDF-related keywords."""
if token in (self.KEYWORD_XREF, self.KEYWORD_STARTXREF):
self.add_results(*self.pop(1))
elif token is self.KEYWORD_ENDOBJ:
self.add_results(*self.pop(4))
elif token is self.KEYWORD_NULL:
# null object
self.push((pos, None))
elif token is self.KEYWORD_R:
# reference to indirect object
try:
((_, objid), (_, genno)) = self.pop(2)
(objid, genno) = (int(objid), int(genno))
obj = PDFObjRef(self.doc, objid, genno)
self.push((pos, obj))
except PSSyntaxError:
pass
elif token is self.KEYWORD_STREAM:
# stream object
((_, dic),) = self.pop(1)
dic = dict_value(dic)
objlen = 0
if not self.fallback:
try:
objlen = int_value(dic['Length'])
except KeyError:
if settings.STRICT:
raise PDFSyntaxError('/Length is undefined: %r' % dic)
self.seek(pos)
try:
(_, line) = self.nextline() # 'stream'
except PSEOF:
if settings.STRICT:
raise PDFSyntaxError('Unexpected EOF')
return
pos += len(line)
self.fp.seek(pos)
data = bytearray(self.fp.read(objlen))
self.seek(pos+objlen)
while 1:
try:
(linepos, line) = self.nextline()
except PSEOF:
if settings.STRICT:
raise PDFSyntaxError('Unexpected EOF')
break
if b'endstream' in line:
i = line.index(b'endstream')
objlen += i
if self.fallback:
data += line[:i]
break
objlen += len(line)
if self.fallback:
data += line
data = bytes(data)
self.seek(pos+objlen)
# XXX limit objlen not to exceed object boundary
log.debug('Stream: pos=%d, objlen=%d, dic=%r, data=%r...', pos,
objlen, dic, data[:10])
obj = PDFStream(dic, data, self.doc.decipher)
self.push((pos, obj))
else:
# others
self.push((pos, token))
return
class PDFStreamParser(PDFParser):
"""
PDFStreamParser is used to parse PDF content streams
that is contained in each page and has instructions
for rendering the page. A reference to a PDF document is
needed because a PDF content stream can also have
indirect references to other objects in the same document.
"""
def __init__(self, data):
PDFParser.__init__(self, BytesIO(data))
return
def flush(self):
self.add_results(*self.popall())
return
KEYWORD_OBJ = KWD(b'obj')
def do_keyword(self, pos, token):
if token is self.KEYWORD_R:
# reference to indirect object
try:
((_, objid), (_, genno)) = self.pop(2)
(objid, genno) = (int(objid), int(genno))
obj = PDFObjRef(self.doc, objid, genno)
self.push((pos, obj))
except PSSyntaxError:
pass
return
elif token in (self.KEYWORD_OBJ, self.KEYWORD_ENDOBJ):
if settings.STRICT:
# See PDF Spec 3.4.6: Only the object values are stored in the
# stream; the obj and endobj keywords are not used.
raise PDFSyntaxError('Keyword endobj found in stream')
return
# others
self.push((pos, token))
return
| {
"content_hash": "6efd7916781ba1eeafe188c5c7d8b0c6",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 78,
"avg_line_length": 31.211764705882352,
"alnum_prop": 0.5358085186581228,
"repo_name": "goulu/pdfminer",
"id": "ee64c2ee42e3237ba1f5437b230ccecc9fd46c99",
"size": "5306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pdfminer/pdfparser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1783"
},
{
"name": "Python",
"bytes": "478562"
}
],
"symlink_target": ""
} |
from os import getenv
from unittest import TestCase
from selenium.webdriver import Remote
from horace.driver import Driver
from config import html_fixture_url
class TestDriver(TestCase):
config = {
'driver': getenv('BROWSER', 'phantomjs'),
'platform': getenv('PLATFORM', 'ANY')
}
def test_default_config(self):
d = Driver(TestDriver.config)
self.assertTrue(d._driver)
self.assertIsInstance(d._driver, Remote)
d.close()
def test_with_config(self):
d = Driver(TestDriver.config)
self.assertTrue(d._driver)
self.assertEquals(d._driver.name, getenv('BROWSER', 'phantomjs'))
d.close()
def test_capabilities(self):
d = Driver(TestDriver.config)
self.assertTrue(d._driver)
capabilities = d._driver.capabilities
self.assertTrue(capabilities['javascriptEnabled'])
d.close()
def test_passthrough(self):
d = Driver(TestDriver.config)
self.assertIsInstance(d._driver, Remote)
html = d.find_elements_by_css_selector('html')
self.assertEquals(len(html), 1)
d.close()
def test_title(self):
d = Driver(TestDriver.config)
d.get(html_fixture_url)
self.assertEquals(d.title, 'Horace Test Page')
d.close()
def test_firstElement(self):
d = Driver(TestDriver.config)
first = d.first_element()
self.assertEquals(first.tag_name, 'html')
d.close()
def test_lastElement(self):
d = Driver(TestDriver.config)
first = d.last_element()
self.assertEquals(first.tag_name, 'body')
d.close()
def test_allElements(self):
d = Driver(TestDriver.config)
allElems = d.all_elements()
self.assertEquals(len(allElems), 3)
d.close() | {
"content_hash": "9fd2d5cd31498832cd083b0d3579e724",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 73,
"avg_line_length": 29.015873015873016,
"alnum_prop": 0.6198030634573304,
"repo_name": "lawrencec/horace",
"id": "cefc383ee8810f21debfe087c1c1a76c534e3111",
"size": "1828",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_driver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46364"
}
],
"symlink_target": ""
} |
from .get_handler import get_handler_class, get_handler # noqa
from .handler_shortcuts import i18n_reverse, transform_url_to_languagecode # noqa
from .i18n_urlpatterns import i18n_patterns, I18nLocalePrefixPattern # noqa
| {
"content_hash": "2140d73b5280cb02b2ac28ba42d5d7ee",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 82,
"avg_line_length": 74.66666666666667,
"alnum_prop": 0.8035714285714286,
"repo_name": "appressoas/ievv_opensource",
"id": "ecff829ffdeba21b5534ff870f0eeaede6e26716",
"size": "224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ievv_opensource/ievv_i18n_url/i18n_url_utils/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "199"
},
{
"name": "Dockerfile",
"bytes": "162"
},
{
"name": "HTML",
"bytes": "7544"
},
{
"name": "JavaScript",
"bytes": "719"
},
{
"name": "Less",
"bytes": "27"
},
{
"name": "Python",
"bytes": "614046"
},
{
"name": "SCSS",
"bytes": "199"
},
{
"name": "Shell",
"bytes": "141"
},
{
"name": "TypeScript",
"bytes": "254"
}
],
"symlink_target": ""
} |
from heatclient.common import base
from heatclient.v1 import stacks
DEFAULT_PAGE_SIZE = 20
class Event(base.Resource):
def __repr__(self):
return "<Event %s>" % self._info
def update(self, **fields):
self.manager.update(self, **fields)
def delete(self):
return self.manager.delete(self)
def data(self, **kwargs):
return self.manager.data(self, **kwargs)
class EventManager(stacks.StackChildManager):
resource_class = Event
def list(self, stack_id, resource_name=None):
"""Get a list of events.
:param stack_id: ID of stack the events belong to
:param resource_name: Optional name of resources to filter events by
:rtype: list of :class:`Event`
"""
if resource_name is None:
url = '/stacks/%s/events' % stack_id
else:
stack_id = self._resolve_stack_id(stack_id)
url = '/stacks/%s/resources/%s/events' % (stack_id, resource_name)
return self._list(url, "events")
def get(self, stack_id, resource_name, event_id):
"""Get the details for a specific event.
:param stack_id: ID of stack containing the event
:param resource_name: ID of resource the event belongs to
:param event_id: ID of event to get the details for
"""
stack_id = self._resolve_stack_id(stack_id)
url_str = '/stacks/%s/resources/%s/events/%s' % (stack_id,
resource_name,
event_id)
resp, body = self.api.json_request('GET', url_str)
return Event(self, body['event'])
| {
"content_hash": "28890390a7eef240559131e1bef1058f",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 78,
"avg_line_length": 34.40816326530612,
"alnum_prop": 0.5747330960854092,
"repo_name": "neumerance/cloudloon2",
"id": "85f43b9a3b3662a8526ea7a8c250ad453c3425ef",
"size": "2316",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": ".venv/lib/python2.7/site-packages/heatclient/v1/events.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "178040"
},
{
"name": "JavaScript",
"bytes": "460971"
},
{
"name": "Perl",
"bytes": "1954"
},
{
"name": "Python",
"bytes": "3227734"
},
{
"name": "Ruby",
"bytes": "76"
},
{
"name": "Shell",
"bytes": "14108"
}
],
"symlink_target": ""
} |
"Use 2.X/3.X keyword args deletion with defaults"
import sys
def print3(*args, **kargs):
sep = kargs.pop('sep', ' ')
end = kargs.pop('end', '\n')
file = kargs.pop('file', sys.stdout)
if kargs: raise TypeError('extra keywords: %s' % kargs)
output = ''
first = True
for arg in args:
output += ('' if first else sep) + str(arg)
first = False
file.write(output + end)
| {
"content_hash": "4f329257a765a370116731012483b821",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 59,
"avg_line_length": 29.714285714285715,
"alnum_prop": 0.5769230769230769,
"repo_name": "dreadrel/UWF_2014_spring_COP3990C-2507",
"id": "1ffad8380ff2f1d3e249cd2862e4f440dedbd175",
"size": "425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notebooks/scripts/book_code/code/print3_alt2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1796"
},
{
"name": "Python",
"bytes": "493591"
}
],
"symlink_target": ""
} |
from contextlib import contextmanager
import os
import requests
from astropy import coordinates
import pytest
from ...utils import commons
from ...utils.testing_tools import MockResponse
from ... import alfalfa
DATA_FILES = {'catalog': 'alfalfa_cat_small.txt',
'spectrum': 'alfalfa_sp.fits'}
class MockResponseAlfalfa(MockResponse):
def __init__(self, content, **kwargs):
super(MockResponseAlfalfa, self).__init__(content, **kwargs)
def iter_lines(self):
for line in self.text.split("\n"):
yield line
def close(self):
pass
@pytest.fixture
def patch_get(request):
mp = request.getfixturevalue("monkeypatch")
mp.setattr(requests, 'get', get_mockreturn)
return mp
@pytest.fixture
def patch_get_readable_fileobj(request):
@contextmanager
def get_readable_fileobj_mockreturn(filename, **kwargs):
file_obj = data_path(DATA_FILES['spectrum']) # TODO: add images option
yield open(file_obj, 'rb') # read as bytes, assuming FITS
mp = request.getfixturevalue("monkeypatch")
mp.setattr(commons, 'get_readable_fileobj',
get_readable_fileobj_mockreturn)
return mp
def get_mockreturn(url, params=None, timeout=10):
filename = data_path(DATA_FILES['catalog'])
content = open(filename, 'rb').read()
return MockResponseAlfalfa(content)
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
# Test Case: A Seyfert 1 galaxy
coords = coordinates.SkyCoord('0h8m05.63s +14d50m23.3s')
ALFALFA = alfalfa.core.Alfalfa()
def test_alfalfa_catalog(patch_get, patch_get_readable_fileobj, coords=coords):
cat = ALFALFA.get_catalog()
assert len(cat) > 0
def test_alfalfa_crossID(patch_get, patch_get_readable_fileobj, coords=coords):
agc = ALFALFA.query_region(coords, optical_counterpart=True)
assert agc == 100051
def test_alfalfa_spectrum(patch_get, patch_get_readable_fileobj,
coords=coords):
agc = ALFALFA.query_region(coords, optical_counterpart=True)
sp = ALFALFA.get_spectrum(agc)
assert len(sp) == 3
| {
"content_hash": "bf9cf830b37db83a1fe4dd6fd95ca7f7",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 79,
"avg_line_length": 27.1875,
"alnum_prop": 0.6841379310344827,
"repo_name": "ceb8/astroquery",
"id": "81c8f5b112c0b22272cd6974bb47c8ec2c39cedc",
"size": "2239",
"binary": false,
"copies": "1",
"ref": "refs/heads/track_master",
"path": "astroquery/alfalfa/tests/test_alfalfa.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "756486"
},
{
"name": "Python",
"bytes": "2760787"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
import pytest
from changelog.util import default_get, get_change_text
class TestUtil():
def test_default_get(self):
d = {}
x = default_get(d, "x", dict)
assert "x" in d
assert d["x"] is x
a = default_get(default_get(d, "x", dict),
"a", list)
assert "x" in d
assert d["x"] is x
assert "a" in d["x"]
assert d["x"]["a"] is a
assert len(a) == 0
def test_change_text_none(self):
with pytest.raises(Exception):
get_change_text(None)
def test_change_text_nothing(self):
assert get_change_text(
"Blabla Blabla\nBlabla Blachangelog bla") is None
defval = "default"
assert get_change_text("Blabla Blabla\nBlabla Blachangelog bla",
defval) == defval
def test_change_text_long(self):
s = """
Blabla bla bla
blab la bla
bla bla
changelog: Long text with leading spaces
bla
bla blal
bla
"""
assert (get_change_text(s, "default") ==
"Long text with leading spaces")
def test_change_text_short(self):
s = """
Blabla bla bla
blab la bla
bla bla
cl: Short prefix with leading spaces
bla
bla blal
bla
"""
assert (get_change_text(s, "default") ==
"Short prefix with leading spaces")
| {
"content_hash": "10b8b339af7014602849ba335e158787",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 72,
"avg_line_length": 24.629032258064516,
"alnum_prop": 0.5180091683038638,
"repo_name": "spacecowboy/changelog-writer",
"id": "70d1ac9e5519a3fc3708972f4ab7a769497f91f8",
"size": "1552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24189"
}
],
"symlink_target": ""
} |
"""This module implements the USB_Port class which allows the pyboard to
implement bioloid devices using the pyboard's USB Serial.
"""
from pyb import USB_VCP
class USB_Port:
"""Implements a port which can be used to receive bioloid device commands
from a host.
"""
def __init__(self):
self.usb_serial = USB_VCP()
self.baud = 0
self.rx_buf_len = 0
self.recv_buf = bytearray(1)
# Disable Control-C on the USB serial port in case one comes in the
# data.
self.usb_serial.setinterrupt(-1)
def any(self):
"""Returns a truthy value if characters are available to be read."""
return self.usb_serial.any()
def read_byte(self):
"""Reads a byte from the usb serial device.
This function will return None if no character was read within the
designated timeout.
The max Return Delay time is 254 x 2 usec = 508 usec (the
default is 500 usec). This represents the minimum time between
receiving a packet and sending a response.
"""
bytes_read = self.usb_serial.recv(self.recv_buf, timeout=2)
if bytes_read > 0:
return self.recv_buf[0]
def set_parameters(self, baud, rx_buf_len):
"""Sets the baud rate and the read buffer length.
Note that for USB Serial, this is essentially
a no-op.
"""
self.baud = baud
self.rx_buf_len = rx_buf_len
def write_packet(self, packet_data):
"""Writes an entire packet to the serial port."""
self.usb_serial.write(packet_data)
| {
"content_hash": "8f3574117715bcbb2814326b35c8b2c0",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 77,
"avg_line_length": 31.607843137254903,
"alnum_prop": 0.619727047146402,
"repo_name": "dhylands/bioloid3",
"id": "6d3a680d12f5c0981ed446f4846fd3ee15f6c1fd",
"size": "1612",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bioloid/stm_usb_port.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "80973"
}
],
"symlink_target": ""
} |
import numpy as np
import GPpref
from scipy.stats import beta
import plot_tools as ptt
def calc_ucb(fhat, vhat, gamma=2.0, sigma_offset=0.0):
return fhat + gamma * (np.sqrt(np.atleast_2d(vhat.diagonal()).T) - sigma_offset)
def softmax_selector(x, tau=1.0):
ex = np.exp((x - x.max())/tau)
Px = ex/ex.sum()
return np.random.choice(len(x), p=Px)
class ActiveLearner(GPpref.PreferenceGaussianProcess):
def init_extras(self):
self._default_uvi = np.array([[0, 1]])
self._plus_y_obs = np.ones((1, 1), dtype='int')
self._minus_y_obs = -1*self._plus_y_obs
def set_hyperparameters(self, log_hyp):
self.log_hyp = log_hyp
def solve_laplace(self, log_hyp=None):
if log_hyp is None:
log_hyp = self.log_hyp
self.f = self.calc_laplace(log_hyp)
return self.f
def get_observations(self):
return self.x_rel, self.uvi_rel, self.x_abs, self.y_rel, self.y_abs
def select_observation(self, p_rel=0.5, domain=None, n_rel_samples=2):
if np.random.uniform() > p_rel: # i.e choose an absolute sample
n_rel_samples = 1
return self.uniform_domain_sampler(n_rel_samples, domain)
def uniform_domain_sampler(self, n_samples, domain=None):
# Domain should be 2 x n_xdim, i.e [[x0_lo, x1_lo, ... , xn_lo], [x0_hi, x1_hi, ... , xn_hi ]]
x_test = np.random.uniform(size=(n_samples, self._xdim))
if domain is not None:
x_test = x_test*np.diff(domain, axis=0) + domain[0, :]
return x_test
def create_posterior_plot(self, x_test, f_true, mu_true, rel_sigma, fuv_train, abs_y_samples, mc_samples):
# Latent predictions
fhat, vhat = self.predict_latent(x_test)
# Expected values
E_y = self.abs_posterior_mean(x_test, fhat, vhat)
# Absolute posterior likelihood (MC sampled)
# Posterior likelihoods (MC sampled for absolute)
p_abs_y_post = self.abs_posterior_likelihood(abs_y_samples, fhat=fhat, varhat=vhat, normal_samples=mc_samples)
p_rel_y_post = self.rel_posterior_likelihood_array(fhat=fhat, varhat=vhat)
x_train, uvi_train, x_abs_train, y_train, y_abs_train = self.get_observations()
uv_train = x_train[uvi_train][:, :, 0]
# Posterior estimates
fig_p, (ax_p_l, ax_p_a, ax_p_r) = \
ptt.estimate_plots(x_test, f_true, mu_true, fhat, vhat, E_y, rel_sigma,
abs_y_samples, p_abs_y_post, p_rel_y_post,
x_abs_train, y_abs_train, uv_train, fuv_train, y_train,
t_a=r'Posterior absolute likelihood, $p(y | \mathcal{Y}, \theta)$',
t_r=r'Posterior relative likelihood $P(x_0 \succ x_1 | \mathcal{Y}, \theta)$')
return fig_p, (ax_p_l, ax_p_a, ax_p_r)
class UCBLatent(ActiveLearner):
# All absolute returns
def select_observation(self, domain=None, n_test=100, gamma=2.0):
x_test = self.uniform_domain_sampler(n_test, domain)
fhat, vhat = self.predict_latent(x_test)
ucb = calc_ucb(fhat, vhat, gamma)
return x_test[[np.argmax(ucb)], :]
class UCBOut(ActiveLearner):
def select_observation(self, domain=None, n_test=100, gamma=2.0):
# Don't know how to recover the second moment of the predictive distribution, so this isn't done
x_test = self.uniform_domain_sampler(n_test, domain)
fhat, vhat = self.predict_latent(x_test)
Ey = self.expected_y(x_test, fhat, vhat)
return x_test[[np.argmax(Ey)], :]
class ABSThresh(ActiveLearner):
def select_observation(self, domain=None, n_test=100, p_thresh=0.7):
x_test = self.uniform_domain_sampler(n_test, domain)
fhat, vhat = self.predict_latent(x_test)
aa, bb = self.abs_likelihood.get_alpha_beta(fhat)
p_under_thresh = beta.cdf(p_thresh, aa, bb)
# ucb = calc_ucb(fhat, vhat, gamma)
return x_test[[np.argmax(p_under_thresh * (1.0 - p_under_thresh))], :]
class UCBAbsRel(ActiveLearner):
def select_observation(self, domain=None, n_test=100, p_rel=0.5, n_rel_samples=2, gamma=2.0, tau=5.0):
x_test = self.uniform_domain_sampler(n_test, domain)
fhat, vhat = self.predict_latent(x_test)
ucb = calc_ucb(fhat, vhat, gamma).flatten()
if np.random.uniform() < p_rel: # i.e choose a relative sample
best_n = [softmax_selector(ucb, tau=tau)] #[np.argmax(ucb)] #
# p_rel_y = self.rel_posterior_likelihood_array(fhat=fhat, varhat=vhat)
sq_dist = GPpref.squared_distance(x_test, x_test)
while len(best_n) < n_rel_samples:
# ucb = ucb*sq_dist[best_n[-1], :] # Discount ucb by distance
ucb[best_n[-1]] = 0.0
# ucb /= p_rel_y[best_n[-1],:] # Divide by likelihood that each point is better than previous best
best_n.append(softmax_selector(ucb, tau=tau*5.0))
# best_n.append(np.argmax(ucb))
else:
best_n = [np.argmax(ucb)] # [softmax_selector(ucb, tau=tau)] #
return x_test[best_n, :]
class PeakComparitor(ActiveLearner):
def test_observation(self, x, y, x_test, gamma):
self.store_observations()
self.add_observations(x, y, self._default_uvi)
f = self.solve_laplace()
fhat, vhat = self.predict_latent(x_test)
ucb = calc_ucb(fhat, vhat, gamma)
self.reset_observations()
return ucb.max()
def store_observations(self):
self.crx, self.cuv, self.cax, self.cry, self.cay = self.get_observations()
def reset_observations(self):
try:
self.set_observations(self.crx, self.cuv, self.cax, self.cry, self.cay)
except AttributeError:
print "reset_observations failed: existing observations not found"
def select_observation(self, domain=None, n_test=50, gamma=2.0, n_rel_samples=2):
n_comparators = n_rel_samples-1
x_test = self.uniform_domain_sampler(n_test, domain)
fhat, vhat = self.predict_latent(x_test)
ucb = calc_ucb(fhat, vhat, gamma)
max_xi = np.argmax(ucb) # Old method used highest x, not ucb
other_xi = np.delete(np.arange(n_test), max_xi)
uvi = np.vstack((max_xi * np.ones(n_test - 1, dtype='int'), other_xi)).T
p_pref = self.rel_likelihood.posterior_likelihood(fhat, vhat, uvi, y=-1)
V = np.zeros(n_test - 1)
x = np.zeros((2, 1), dtype='float')
x[0] = x_test[max_xi]
# Now calculate the expected value for each observation pair
for i,uvi1 in enumerate(other_xi):
x[1] = x_test[uvi1]
V[i] += p_pref[i]*self.test_observation(x, self._minus_y_obs, x_test, gamma)
if (1 - p_pref[i]) > 1e-3:
V[i] += (1-p_pref[i])*self.test_observation(x, self._plus_y_obs, x_test, gamma)
best_n = np.argpartition(V, -n_comparators)[-n_comparators:]
# best = np.argmax(V)
cVmax = np.argmax(ucb) # This is repeated in case I want to change max_xi
if ucb[cVmax] > V.max():
return x_test[[cVmax], :]
else:
xi = np.zeros(n_comparators+1, dtype='int')
xi[0] = max_xi
xi[1:] = other_xi[best_n]
return x_test[xi, :]
class LikelihoodImprovement(PeakComparitor):
def test_observation(self, x, y, x_test, max_xi):
self.store_observations()
self.add_observations(x, y, self._default_uvi)
f = self.solve_laplace()
fhat, vhat = self.predict_latent(x_test)
new_xi = np.argmax(fhat)
p_new_is_better = self.rel_likelihood.posterior_likelihood(fhat, vhat, np.array([[max_xi, new_xi]]), self._plus_y_obs)
self.reset_observations()
return p_new_is_better
def select_observation(self, domain=None, n_test=50, req_improvement=0.6, n_rel_samples=2, gamma=1.5, p_thresh=0.7):
n_comparators = n_rel_samples-1
x_test = self.uniform_domain_sampler(n_test, domain)
fhat, vhat = self.predict_latent(x_test)
max_xi = np.argmax(fhat)
other_xi = np.delete(np.arange(n_test), max_xi)
uvi = np.vstack((max_xi * np.ones(n_test - 1, dtype='int'), other_xi)).T
p_pref = self.rel_likelihood.posterior_likelihood(fhat, vhat, uvi, y=-1)
V = np.zeros(n_test - 1)
x = np.zeros((2, 1), dtype='float')
x[0] = x_test[max_xi]
# Now calculate the expected value for each observation pair
for i,uvi1 in enumerate(other_xi):
x[1] = x_test[uvi1]
V[i] += p_pref[i]*self.test_observation(x, self._minus_y_obs, x_test, max_xi)
if (1-p_pref[i]) > 1e-3:
V[i] += (1-p_pref[i])*self.test_observation(x, self._plus_y_obs, x_test, max_xi)
Vmax = V.max()
# best_n = np.argpartition(V, -n_comparators)[-n_comparators:]
# best = np.argmax(V)
print 'V_max = {0}'.format(Vmax)
if Vmax < req_improvement:
# aa, bb = self.abs_likelihood.get_alpha_beta(fhat)
# p_under_thresh = beta.cdf(p_thresh, aa, bb)
# return x_test[[np.argmax(p_under_thresh*(1.0-p_under_thresh))], :]
ucb = calc_ucb(fhat, vhat, gamma, self.rel_likelihood.sigma)
return x_test[[np.argmax(ucb)], :]
else:
best_n = []
while len(best_n) < n_comparators:
cbest = np.argmax(V)
best_n.append(cbest)
V = V * np.sqrt(GPpref.squared_distance(x_test[[other_xi[cbest]], :], x_test[other_xi])[0])
xi = np.zeros(n_comparators+1, dtype='int')
xi[0] = max_xi
xi[1:] = other_xi[best_n]
return x_test[xi, :]
| {
"content_hash": "6e1e26e2eeb1a3f7c8e47cdbcb8b1a59",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 126,
"avg_line_length": 44.05855855855856,
"alnum_prop": 0.5848072794192822,
"repo_name": "nrjl/GPN",
"id": "0be43b64adc83b327fc804afe50cf2792af0a08e",
"size": "9781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "active_learners.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77650"
}
],
"symlink_target": ""
} |
"""
Rest API for Home Assistant.
For more details about the RESTful API, please refer to the documentation at
https://home-assistant.io/developers/api/
"""
import asyncio
import json
import logging
from aiohttp import web
import async_timeout
import homeassistant.core as ha
import homeassistant.remote as rem
from homeassistant.bootstrap import ERROR_LOG_FILENAME
from homeassistant.const import (
EVENT_HOMEASSISTANT_STOP, EVENT_TIME_CHANGED,
HTTP_BAD_REQUEST, HTTP_CREATED, HTTP_NOT_FOUND,
MATCH_ALL, URL_API, URL_API_COMPONENTS,
URL_API_CONFIG, URL_API_DISCOVERY_INFO, URL_API_ERROR_LOG,
URL_API_EVENTS, URL_API_SERVICES,
URL_API_STATES, URL_API_STATES_ENTITY, URL_API_STREAM, URL_API_TEMPLATE,
__version__)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers.state import AsyncTrackStates
from homeassistant.helpers import template
from homeassistant.components.http import HomeAssistantView
DOMAIN = 'api'
DEPENDENCIES = ['http']
STREAM_PING_PAYLOAD = "ping"
STREAM_PING_INTERVAL = 50 # seconds
_LOGGER = logging.getLogger(__name__)
def setup(hass, config):
"""Register the API with the HTTP interface."""
hass.http.register_view(APIStatusView)
hass.http.register_view(APIEventStream)
hass.http.register_view(APIConfigView)
hass.http.register_view(APIDiscoveryView)
hass.http.register_view(APIStatesView)
hass.http.register_view(APIEntityStateView)
hass.http.register_view(APIEventListenersView)
hass.http.register_view(APIEventView)
hass.http.register_view(APIServicesView)
hass.http.register_view(APIDomainServicesView)
hass.http.register_view(APIComponentsView)
hass.http.register_view(APITemplateView)
hass.http.register_static_path(
URL_API_ERROR_LOG, hass.config.path(ERROR_LOG_FILENAME), False)
return True
class APIStatusView(HomeAssistantView):
"""View to handle Status requests."""
url = URL_API
name = "api:status"
@ha.callback
def get(self, request):
"""Retrieve if API is running."""
return self.json_message('API running.')
class APIEventStream(HomeAssistantView):
"""View to handle EventStream requests."""
url = URL_API_STREAM
name = "api:stream"
@asyncio.coroutine
def get(self, request):
"""Provide a streaming interface for the event bus."""
# pylint: disable=no-self-use
hass = request.app['hass']
stop_obj = object()
to_write = asyncio.Queue(loop=hass.loop)
restrict = request.query.get('restrict')
if restrict:
restrict = restrict.split(',') + [EVENT_HOMEASSISTANT_STOP]
@asyncio.coroutine
def forward_events(event):
"""Forward events to the open request."""
if event.event_type == EVENT_TIME_CHANGED:
return
if restrict and event.event_type not in restrict:
return
_LOGGER.debug('STREAM %s FORWARDING %s', id(stop_obj), event)
if event.event_type == EVENT_HOMEASSISTANT_STOP:
data = stop_obj
else:
data = json.dumps(event, cls=rem.JSONEncoder)
yield from to_write.put(data)
response = web.StreamResponse()
response.content_type = 'text/event-stream'
yield from response.prepare(request)
unsub_stream = hass.bus.async_listen(MATCH_ALL, forward_events)
try:
_LOGGER.debug('STREAM %s ATTACHED', id(stop_obj))
# Fire off one message so browsers fire open event right away
yield from to_write.put(STREAM_PING_PAYLOAD)
while True:
try:
with async_timeout.timeout(STREAM_PING_INTERVAL,
loop=hass.loop):
payload = yield from to_write.get()
if payload is stop_obj:
break
msg = "data: {}\n\n".format(payload)
_LOGGER.debug('STREAM %s WRITING %s', id(stop_obj),
msg.strip())
response.write(msg.encode("UTF-8"))
yield from response.drain()
except asyncio.TimeoutError:
yield from to_write.put(STREAM_PING_PAYLOAD)
except asyncio.CancelledError:
_LOGGER.debug('STREAM %s ABORT', id(stop_obj))
finally:
_LOGGER.debug('STREAM %s RESPONSE CLOSED', id(stop_obj))
unsub_stream()
class APIConfigView(HomeAssistantView):
"""View to handle Config requests."""
url = URL_API_CONFIG
name = "api:config"
@ha.callback
def get(self, request):
"""Get current configuration."""
return self.json(request.app['hass'].config.as_dict())
class APIDiscoveryView(HomeAssistantView):
"""View to provide discovery info."""
requires_auth = False
url = URL_API_DISCOVERY_INFO
name = "api:discovery"
@ha.callback
def get(self, request):
"""Get discovery info."""
hass = request.app['hass']
needs_auth = hass.config.api.api_password is not None
return self.json({
'base_url': hass.config.api.base_url,
'location_name': hass.config.location_name,
'requires_api_password': needs_auth,
'version': __version__
})
class APIStatesView(HomeAssistantView):
"""View to handle States requests."""
url = URL_API_STATES
name = "api:states"
@ha.callback
def get(self, request):
"""Get current states."""
return self.json(request.app['hass'].states.async_all())
class APIEntityStateView(HomeAssistantView):
"""View to handle EntityState requests."""
url = "/api/states/{entity_id}"
name = "api:entity-state"
@ha.callback
def get(self, request, entity_id):
"""Retrieve state of entity."""
state = request.app['hass'].states.get(entity_id)
if state:
return self.json(state)
else:
return self.json_message('Entity not found', HTTP_NOT_FOUND)
@asyncio.coroutine
def post(self, request, entity_id):
"""Update state of entity."""
hass = request.app['hass']
try:
data = yield from request.json()
except ValueError:
return self.json_message('Invalid JSON specified',
HTTP_BAD_REQUEST)
new_state = data.get('state')
if new_state is None:
return self.json_message('No state specified', HTTP_BAD_REQUEST)
attributes = data.get('attributes')
force_update = data.get('force_update', False)
is_new_state = hass.states.get(entity_id) is None
# Write state
hass.states.async_set(entity_id, new_state, attributes, force_update)
# Read the state back for our response
status_code = HTTP_CREATED if is_new_state else 200
resp = self.json(hass.states.get(entity_id), status_code)
resp.headers.add('Location', URL_API_STATES_ENTITY.format(entity_id))
return resp
@ha.callback
def delete(self, request, entity_id):
"""Remove entity."""
if request.app['hass'].states.async_remove(entity_id):
return self.json_message('Entity removed')
else:
return self.json_message('Entity not found', HTTP_NOT_FOUND)
class APIEventListenersView(HomeAssistantView):
"""View to handle EventListeners requests."""
url = URL_API_EVENTS
name = "api:event-listeners"
@ha.callback
def get(self, request):
"""Get event listeners."""
return self.json(async_events_json(request.app['hass']))
class APIEventView(HomeAssistantView):
"""View to handle Event requests."""
url = '/api/events/{event_type}'
name = "api:event"
@asyncio.coroutine
def post(self, request, event_type):
"""Fire events."""
body = yield from request.text()
event_data = json.loads(body) if body else None
if event_data is not None and not isinstance(event_data, dict):
return self.json_message('Event data should be a JSON object',
HTTP_BAD_REQUEST)
# Special case handling for event STATE_CHANGED
# We will try to convert state dicts back to State objects
if event_type == ha.EVENT_STATE_CHANGED and event_data:
for key in ('old_state', 'new_state'):
state = ha.State.from_dict(event_data.get(key))
if state:
event_data[key] = state
request.app['hass'].bus.async_fire(event_type, event_data,
ha.EventOrigin.remote)
return self.json_message("Event {} fired.".format(event_type))
class APIServicesView(HomeAssistantView):
"""View to handle Services requests."""
url = URL_API_SERVICES
name = "api:services"
@ha.callback
def get(self, request):
"""Get registered services."""
return self.json(async_services_json(request.app['hass']))
class APIDomainServicesView(HomeAssistantView):
"""View to handle DomainServices requests."""
url = "/api/services/{domain}/{service}"
name = "api:domain-services"
@asyncio.coroutine
def post(self, request, domain, service):
"""Call a service.
Returns a list of changed states.
"""
hass = request.app['hass']
body = yield from request.text()
data = json.loads(body) if body else None
with AsyncTrackStates(hass) as changed_states:
yield from hass.services.async_call(domain, service, data, True)
return self.json(changed_states)
class APIComponentsView(HomeAssistantView):
"""View to handle Components requests."""
url = URL_API_COMPONENTS
name = "api:components"
@ha.callback
def get(self, request):
"""Get current loaded components."""
return self.json(request.app['hass'].config.components)
class APITemplateView(HomeAssistantView):
"""View to handle requests."""
url = URL_API_TEMPLATE
name = "api:template"
@asyncio.coroutine
def post(self, request):
"""Render a template."""
try:
data = yield from request.json()
tpl = template.Template(data['template'], request.app['hass'])
return tpl.async_render(data.get('variables'))
except (ValueError, TemplateError) as ex:
return self.json_message('Error rendering template: {}'.format(ex),
HTTP_BAD_REQUEST)
def async_services_json(hass):
"""Generate services data to JSONify."""
return [{"domain": key, "services": value}
for key, value in hass.services.async_services().items()]
def async_events_json(hass):
"""Generate event data to JSONify."""
return [{"event": key, "listener_count": value}
for key, value in hass.bus.async_listeners().items()]
| {
"content_hash": "885f381f0ed5cfe2d5b38e984316aba6",
"timestamp": "",
"source": "github",
"line_count": 360,
"max_line_length": 79,
"avg_line_length": 30.894444444444446,
"alnum_prop": 0.6164358928250314,
"repo_name": "alexmogavero/home-assistant",
"id": "8205029bd21a722a5e3ece6f1f64ee71e7984ce7",
"size": "11122",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1905204"
},
{
"name": "Python",
"bytes": "6749372"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "14930"
}
],
"symlink_target": ""
} |
"""Module loader file for /ciscripts/deploy/conan."""
| {
"content_hash": "cabc41608d4db2467cf88086b0f749c6",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 53,
"avg_line_length": 54,
"alnum_prop": 0.7222222222222222,
"repo_name": "polysquare/polysquare-ci-scripts",
"id": "a0df194b4f94e3f8e1105562daa666969de4cfe9",
"size": "190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ciscripts/deploy/conan/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "322057"
},
{
"name": "Shell",
"bytes": "255"
}
],
"symlink_target": ""
} |
from pnc_cli import makemead
import random
import string
def test_sso():
makemead.make_mead(config="configs/sso.cfg", product_name="sso", product_version="7.1", run_build=True, suffix=get_suffix())
def get_suffix():
return "-" + ''.join(random.choice(string.ascii_uppercase + string.digits)
for _ in range(10))
| {
"content_hash": "64032a0495c7f103588465340a8f944d",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 128,
"avg_line_length": 34.6,
"alnum_prop": 0.6589595375722543,
"repo_name": "project-ncl/pnc-cli",
"id": "c88bf7b2fca1aae757af85304c6015e7e0779a26",
"size": "346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/sso/test_sso.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "70367"
},
{
"name": "Python",
"bytes": "1865278"
},
{
"name": "Shell",
"bytes": "2479"
}
],
"symlink_target": ""
} |
from objects import PadStorage
__author__ = 'Taras Drapalyuk <taras@drapalyuk.com>'
__date__ = '24.05.2015'
import uuid
from datetime import datetime, timedelta
import tornado.web
import tornado.ioloop
from gorinich import logger
from gorinich.utils import get_server_address
class Application(tornado.web.Application):
# How often this application will clean old sessions (tokens)
CLEAN_SESSION_INTERVAL = 60 * 60 # in seconds
OLD_SESSION_LIFETIME = 60 * 60 # in seconds
CLEAN_PADS_INTERVAL = 60 * 60 # in seconds
# How often this application will send Ping
PING_INTERVAL = 60 # in seconds
def __init__(self, *args, **kwargs):
# initialize tornado's application
super(Application, self).__init__(*args, **kwargs)
# create unique uid for this application
self.uid = uuid.uuid4().hex
# initialize dict to keep administrator's connections
self.admin_connections = {}
# dictionary to keep client's connections
self.connections = {}
# application engine
self.engine = None
self.address = get_server_address()
self.pad_storage = PadStorage(pad_lifetime=self.settings['pad_lifetime'])
def initialize(self):
self.init_engine()
# self.init_ping()
# self.init_clean_sessions()
self.init_clean_pads()
def init_engine(self):
"""
Initialize engine.
"""
tornado.ioloop.IOLoop.instance().add_callback(self.engine.initialize)
def init_ping(self):
"""
Start periodic tasks for sending quotes to all clients.
"""
self.send_ping_periodically = tornado.ioloop.PeriodicCallback(
callback=self.send_ping,
callback_time=self.PING_INTERVAL * 1000
)
self.send_ping_periodically.start()
logger.info('Ping initialized')
def update_ping_interval(self, ping_interval):
"""
Update quote interval.
"""
self.send_ping_periodically.stop()
self.send_ping_periodically = tornado.ioloop.PeriodicCallback(
callback=self.send_ping,
callback_time=ping_interval * 1000
)
self.send_ping_periodically.start()
def send_ping(self):
"""
Send Ping! to all clients
"""
self.engine.publish_public_message('Ping!')
# def init_clean_sessions(self):
# """
# Start periodic tasks for cleaning old activated sessions.
# """
# tornado.ioloop.PeriodicCallback(
# callback=self.clean_sessions,
# callback_time=self.CLEAN_SESSION_INTERVAL * 1000
# ).start()
# logger.info('Clean session initialized')
# def clean_sessions(self):
# """
# Remove old activated sessions
# """
# logger.info('Starting clean sessions')
# active_account_ids = self.engine.get_account_ids()
# create_date_before = datetime.utcnow() - timedelta(seconds=self.OLD_SESSION_LIFETIME)
# session_manager.remove_activated(
# exclude_account_ids=active_account_ids,
# create_date_before=create_date_before
# )
def init_clean_pads(self):
"""
Start periodic tasks for cleaning old pads sessions.
"""
tornado.ioloop.PeriodicCallback(
callback=self.clean_pads,
callback_time=self.CLEAN_PADS_INTERVAL * 1000
).start()
logger.info('Clean pads initialized')
def clean_pads(self):
logger.info('Starting clean pads')
self.pad_storage.delete_expired_pads()
# def add_connection(self, account_id, uid, client):
# """
# Register new client's connection.
# """
# if account_id not in self.connections:
# self.connections[account_id] = {}
#
# self.connections[account_id][uid] = client
#
# def remove_connection(self, account_id, uid):
# """
# Remove client's connection
# """
# try:
# del self.connections[account_id][uid]
# except KeyError:
# pass
#
# # Clean only empty connections list
# if account_id in self.connections and not self.connections[account_id]:
# try:
# del self.connections[account_id]
# except KeyError:
# pass
#
# def add_admin_connection(self, uid, client):
# """
# Register administrator's connection (from web-interface).
# """
# self.admin_connections[uid] = client
#
# def remove_admin_connection(self, uid):
# """
# Remove administrator's connection.
# """
# try:
# del self.admin_connections[uid]
# except KeyError:
# pass
| {
"content_hash": "5eea23736b5cbeab17e2d9056beef9ad",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 95,
"avg_line_length": 31.29813664596273,
"alnum_prop": 0.567176026989482,
"repo_name": "kulapard/Gorinich",
"id": "6ed4488cb7da53bc9e3676e6e8b418a8dc993928",
"size": "5063",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gorinich/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3433"
},
{
"name": "Python",
"bytes": "15048"
}
],
"symlink_target": ""
} |
# course: ECE 1160
# laboratory: 2
# date: 10/04/18
# username: zmm15
# name: Zachary M. Mattis
# title: Raspberry Pi SenseHat Joystick
# description: SenseHat G-Force Detection
#from sense_hat import SenseHat
import color
import time | {
"content_hash": "dc5e7844708c9ec5787289f57a04ca2e",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 45,
"avg_line_length": 22.416666666666668,
"alnum_prop": 0.6617100371747212,
"repo_name": "zmattis/University-of-Pittsburgh",
"id": "909e9f79d8618e04a98cb7e2fa2e0ee60ce9078c",
"size": "287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ECE-1160/L2_3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "678183"
},
{
"name": "C",
"bytes": "1075345"
},
{
"name": "C++",
"bytes": "1204"
},
{
"name": "Java",
"bytes": "288661"
},
{
"name": "MATLAB",
"bytes": "46294"
},
{
"name": "Makefile",
"bytes": "1538"
},
{
"name": "PLpgSQL",
"bytes": "1494"
},
{
"name": "Python",
"bytes": "8021"
},
{
"name": "SQLPL",
"bytes": "1213"
},
{
"name": "Shell",
"bytes": "17236"
},
{
"name": "TeX",
"bytes": "146569"
}
],
"symlink_target": ""
} |
"""
Tests for confab data model.
"""
from os.path import dirname, join
from nose.tools import eq_
from unittest import TestCase
from confab.data import import_configuration, DataLoader
from confab.definitions import Settings
class TestData(TestCase):
def setUp(self):
self.settings = Settings()
self.settings.environmentdefs = {
"environment": ["host"],
}
self.settings.roledefs = {
"role": ["host"],
}
self.settings.componentdefs = {
"role": ["component"],
}
self.component = self.settings.for_env("environment").components().next()
def test_data_templates(self):
"""
Data modules can be templates.
This test loads the "bar.py_tmpl" file as a Jinja template and converts it
into a Python module. In the process of resolving this template, the foo.py
module is included (defining "foo") and the "baz.py" macro is evaluated
(defining "baz").
"""
data = import_configuration('bar', join(dirname(__file__), 'data/templates'))
eq_(data,
{'foo': 'foo',
'bar': 'bar',
'baz': {'n': 42}})
def test_load_order(self):
"""
Data modules are always loaded in the same order: default,
component, role, environment, then host.
"""
loader = DataLoader(join(dirname(__file__), 'data/order'))
eq_(loader(self.component)['data'],
{'default': 'default',
'component': 'component',
'role': 'role',
'environment': 'environment',
'host': 'host'})
def test_custom_data_modules_load_order(self):
"""
Defining custom data modules does not affect load order.
"""
loader = DataLoader(join(dirname(__file__), 'data/order'),
data_modules=reversed(DataLoader.ALL))
eq_(loader(self.component)['data'],
{'default': 'default',
'component': 'component',
'role': 'role',
'environment': 'environment',
'host': 'host'})
def test_custom_data_modules_selection(self):
"""
Defining custom data modules may select specific modules to load.
"""
loader = DataLoader(join(dirname(__file__), 'data/order'),
data_modules=['component',
'host'])
eq_(loader(self.component)['data'],
{'role': 'component',
'component': 'component',
'environment': 'component',
'host': 'host'})
def test_nested_configuration_files(self):
'''
Load configuration data from nested folder structure.
'''
loader = DataLoader(join(dirname(__file__), 'data/nested'))
eq_(loader(self.component)['data'],
{'default': 'default',
'component': 'component',
'role': 'role',
'environment': 'environment',
'host': 'host'})
def test_missing_data_module(self):
"""
If a data module does not exist, it is ignored.
"""
loader = DataLoader(join(dirname(__file__), 'data/missing'),
data_modules=['component'])
# no module named component
eq_(None, loader(self.component).get('data'))
def test_broken_data_module(self):
"""
If a data module has a broken import, an import error is raised.
"""
loader = DataLoader(join(dirname(__file__), 'data/broken'),
data_modules=['component'])
with self.assertRaises(ImportError):
loader(self.component).get('data')
def test_broken_data_template(self):
"""
If a data template has a broken import, an import error is raised.
"""
loader = DataLoader(join(dirname(__file__), 'data/broken'),
data_modules=['host'])
with self.assertRaises(ImportError):
loader(self.component).get('data')
def test_data_callables(self):
"""
Data callables are applied when merging.
"""
loader = DataLoader(join(dirname(__file__), 'data/callables'))
data = loader(self.component)
eq_(data['appended'], ['default', 'environment'])
eq_(data['prepended'], ['environment', 'default'])
eq_(data['unique'], ['default'])
eq_(data['rotated'], ['pivot', 'itemB', 'itemA'])
| {
"content_hash": "7033153543d8605a36e4e0588f4abcf9",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 85,
"avg_line_length": 32.69285714285714,
"alnum_prop": 0.536596023596242,
"repo_name": "locationlabs/confab",
"id": "cd0554fdc357a2e4a91404bc392dc2aba10216d0",
"size": "4577",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "confab/tests/test_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "119227"
}
],
"symlink_target": ""
} |
"""
CUSF Simulator Utilities
"""
| {
"content_hash": "30b2bbc86339ffde8f72033731e490c5",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 24,
"avg_line_length": 8.5,
"alnum_prop": 0.6470588235294118,
"repo_name": "cuspaceflight/firefish",
"id": "a8c8cd04a69ee3290601be04cd4c933d83d11d7e",
"size": "34",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "firefish/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "GLSL",
"bytes": "823"
},
{
"name": "Makefile",
"bytes": "1092"
},
{
"name": "OpenSCAD",
"bytes": "1668"
},
{
"name": "Python",
"bytes": "86356"
},
{
"name": "Shell",
"bytes": "1374"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.logic import LogicManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-logic
# USAGE
python gets_the_integration_service_environment_network_health.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = LogicManagementClient(
credential=DefaultAzureCredential(),
subscription_id="f34b22a3-2202-4fb1-b040-1332bd928c84",
)
response = client.integration_service_environment_network_health.get(
resource_group="testResourceGroup",
integration_service_environment_name="testIntegrationServiceEnvironment",
)
print(response)
# x-ms-original-file: specification/logic/resource-manager/Microsoft.Logic/stable/2019-05-01/examples/IntegrationServiceEnvironments_NetworkHealth.json
if __name__ == "__main__":
main()
| {
"content_hash": "fd4f7c24f2c63f017cfd406829e2bce3",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 151,
"avg_line_length": 36.78787878787879,
"alnum_prop": 0.7537067545304778,
"repo_name": "Azure/azure-sdk-for-python",
"id": "99bffc1e881d7d8dd64662eb4844fcb65806da0b",
"size": "1682",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/logic/azure-mgmt-logic/generated_samples/gets_the_integration_service_environment_network_health.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from unittesting import DeferrableTestCase
from GitSavvy.tests.mockito import when, verify
from GitSavvy.tests.parameterized import parameterized as p
from GitSavvy.core.git_mixins.history import HistoryMixin
examples = [
("from commit to working dir", "abc", None, ["abc", None]),
("from commit to commit", "abc", "def", ["abc", "def"]),
("from commit to commit", "abc", "HEAD", ["abc", "HEAD"]),
("from working dir to commit", None, "def", ["-R", "def"]),
("from working dir to HEAD", None, "HEAD", ["-R", "HEAD"]),
]
class TestDescribeGraphLine(DeferrableTestCase):
@p.expand(examples)
def test_no_context_diff_logic(self, _, base, target, cmd):
test = HistoryMixin()
when(test, strict=False).git("diff", ...).thenReturn("irrelevant")
test.no_context_diff(base, target)
common = ["diff", "--no-color", "-U0"]
verify(test).git(*(common + cmd))
def test_no_context_diff_add_file_if_given(self):
test = HistoryMixin()
when(test, strict=False).git("diff", ...).thenReturn("irrelevant")
test.no_context_diff("a", "b", "foofile.py")
common = ["diff", "--no-color", "-U0"]
cmd = ["a", "b", "--", "foofile.py"]
verify(test).git(*(common + cmd))
| {
"content_hash": "e4fa4738ee8e0f59760f539a9a64383b",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 74,
"avg_line_length": 39.5625,
"alnum_prop": 0.6058451816745656,
"repo_name": "divmain/GitSavvy",
"id": "2fcf2390409a0d3c446f4d275b60a889b1146d28",
"size": "1266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_history_mixin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "118"
},
{
"name": "HTML",
"bytes": "13504"
},
{
"name": "Python",
"bytes": "646248"
},
{
"name": "Shell",
"bytes": "2011"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import sys
from pathlib import Path
from typing import TYPE_CHECKING
import pytest
# noinspection PyProtectedMember
from build.__main__ import build_package
from build.util import project_wheel_metadata
from tests.testutils import subprocess_run
from tests.testutils import temporary_project_directory
if TYPE_CHECKING:
from _pytest.fixtures import FixtureRequest
pytestmark = pytest.mark.integration
@pytest.mark.parametrize(
"getter, project",
[
("common_project", "simple_project"),
("masonry_project", "src_extended"),
("masonry_project", "disable_setup_py"),
],
)
def test_pep517_check_poetry_managed(
request: FixtureRequest, getter: str, project: str
) -> None:
with temporary_project_directory(request.getfixturevalue(getter)(project)) as path:
assert project_wheel_metadata(path)
def test_pep517_check(project_source_root: Path) -> None:
assert project_wheel_metadata(str(project_source_root))
def test_pep517_build_sdist(
temporary_directory: Path, project_source_root: Path
) -> None:
build_package(
srcdir=str(project_source_root),
outdir=str(temporary_directory),
distributions=["sdist"],
)
distributions = list(temporary_directory.glob("poetry_core-*.tar.gz"))
assert len(distributions) == 1
def test_pep517_build_wheel(
temporary_directory: Path, project_source_root: Path
) -> None:
build_package(
srcdir=str(project_source_root),
outdir=str(temporary_directory),
distributions=["wheel"],
)
distributions = list(temporary_directory.glob("poetry_core-*-none-any.whl"))
assert len(distributions) == 1
def test_pip_wheel_build(temporary_directory: Path, project_source_root: Path) -> None:
tmp = str(temporary_directory)
pip = subprocess_run(
"pip", "wheel", "--use-pep517", "-w", tmp, str(project_source_root)
)
assert "Successfully built poetry-core" in pip.stdout
assert pip.returncode == 0
wheels = list(Path(tmp).glob("poetry_core-*-none-any.whl"))
assert len(wheels) == 1
@pytest.mark.xfail(
sys.version_info < (3, 8),
# see https://github.com/python/importlib_metadata/issues/392
reason="importlib-metadata can't be installed with --no-binary anymore",
strict=True,
)
def test_pip_install_no_binary(python: str, project_source_root: Path) -> None:
subprocess_run(
python,
"-m",
"pip",
"install",
"--no-binary",
":all:",
project_source_root.as_posix(),
)
pip_show = subprocess_run(python, "-m", "pip", "show", "poetry-core")
assert "Name: poetry-core" in pip_show.stdout
| {
"content_hash": "e9cecee31691b3dd40c1ceeca253b993",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 87,
"avg_line_length": 27.755102040816325,
"alnum_prop": 0.6713235294117647,
"repo_name": "python-poetry/poetry-core",
"id": "0706112ba8e03b88abf2a2e73f7ca4ec34d080e1",
"size": "2720",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/integration/test_pep517.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2664"
},
{
"name": "Makefile",
"bytes": "1021"
},
{
"name": "Python",
"bytes": "2084191"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse_lazy
from django.views.generic import TemplateView
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth import logout
from apps.landing.models import Landing
from apps.base.views import (BaseView, LoginRequiredMixin)
class UserProfileView(BaseView, LoginRequiredMixin, TemplateView):
"""docstring for AboutView"""
template_name = 'user/profile.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
info = {
'info': {
'title': 'User Profile - NMS',
},
}
context.update(info)
return context | {
"content_hash": "823cab23c84e6b72a73c9b872d40bcce",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 66,
"avg_line_length": 31.59090909090909,
"alnum_prop": 0.6805755395683454,
"repo_name": "pythonvietnam/nms",
"id": "dfe60db10b9571b50037df86ae1de6f79642e05e",
"size": "695",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/user/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1185163"
},
{
"name": "HTML",
"bytes": "110643"
},
{
"name": "JavaScript",
"bytes": "4998231"
},
{
"name": "Python",
"bytes": "62913"
}
],
"symlink_target": ""
} |
""" lightCurve.py
This purpose of this program is to produce a light curve from data for time and brightness.
Language: Python 3
Tanner Leighton
Written for CIERA Summer Internship, Northwestern University.
6/22/17
"""
"""First let's import the libraries that will provide you with some useful functions.
We'll start by importing the matplotlib, numpy, and curve_fit libraries"""
#%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
xList = []
yList = []
errorList = []
fin = open("star1DataPrime.txt")
for line in fin:
#line = line.strip()
line = line.split() # Splits each line into constituent numbers.
xList.append(line[0])
yList.append(line[1])
errorList.append(line[2])
fin.close()
xListPrime = []
for element in xList:
xListPrime.append(float(element))
yListPrime = []
for element in yList:
yListPrime.append(float(element))
errorListPrime = []
for element in errorList:
errorListPrime.append(float(element))
plt.errorbar(xListPrime, yListPrime, yerr = errorListPrime, fmt='ro', markersize=3)
plt.title("Star 1 Light Curve")
plt.xlabel("time")
plt.ylabel("brightness")
plt.show()
xListSmallTime = []
xListLargeTime = []
yListSmallTime = []
yListLargeTime = []
errorListSmallTime = []
errorListLargeTime = []
length = len(xListPrime)
for i in range(length):
if xListPrime[i] < 55600:
xListSmallTime.append(xListPrime[i])
yListSmallTime.append(yListPrime[i])
errorListSmallTime.append(errorListPrime[i])
else:
xListLargeTime.append(xListPrime[i])
yListLargeTime.append(yListPrime[i])
errorListLargeTime.append(errorListPrime[i])
plt.errorbar(xListSmallTime, yListSmallTime, yerr = errorListSmallTime, fmt='ro', markersize=3)
plt.title("Star 1 Light Curve 1")
plt.xlabel("time")
plt.ylabel("brightness")
plt.show()
plt.errorbar(xListLargeTime, yListLargeTime, yerr = errorListLargeTime, fmt='ro', markersize=3)
plt.title("Star 1 Light Curve 2")
plt.xlabel("time")
plt.ylabel("brightness")
plt.show()
| {
"content_hash": "16478488c3598adeb46f95fb89a471bc",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 96,
"avg_line_length": 25.790123456790123,
"alnum_prop": 0.7185256103398755,
"repo_name": "krispmort/NUREU17",
"id": "0995015bf881dc5638f23302f0e100fd9486a808",
"size": "2089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LSST/VariableStarClassification/lightCurve.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "8395230"
},
{
"name": "Python",
"bytes": "2089"
}
],
"symlink_target": ""
} |
from hanzo.httptools.messaging import RequestMessage, ResponseMessage, HTTP09Response
__all__ = [
"RequestMessage",
"ResponseMessage",
"HTTP09Response",
]
| {
"content_hash": "21cddf7a170d391b037125839f90dbe2",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 85,
"avg_line_length": 21.125,
"alnum_prop": 0.7218934911242604,
"repo_name": "internetarchive/warctools",
"id": "85ced342cfc90b329ffde0f59e05d16c92842f5d",
"size": "169",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hanzo/httptools/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "133515"
},
{
"name": "Shell",
"bytes": "2221"
}
],
"symlink_target": ""
} |
from google.cloud import gaming_v1
def sample_fetch_deployment_state():
# Create a client
client = gaming_v1.GameServerDeploymentsServiceClient()
# Initialize request argument(s)
request = gaming_v1.FetchDeploymentStateRequest(
name="name_value",
)
# Make the request
response = client.fetch_deployment_state(request=request)
# Handle the response
print(response)
# [END gameservices_v1_generated_GameServerDeploymentsService_FetchDeploymentState_sync]
| {
"content_hash": "9a58a6a8e662effa3a71e4d6418efd18",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 88,
"avg_line_length": 26.473684210526315,
"alnum_prop": 0.73558648111332,
"repo_name": "googleapis/python-game-servers",
"id": "4738e23782e739180ed1039559740a52865999b9",
"size": "1927",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/gameservices_v1_generated_game_server_deployments_service_fetch_deployment_state_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2394740"
},
{
"name": "Shell",
"bytes": "30678"
}
],
"symlink_target": ""
} |
def agregar_una_vez(lista, elemento):
try:
if elemento not in lista:
lista.append(elemento)
else:
raise ValueError
except ValueError:
print('Error: Imposible añadir elementos duplicados => {}'.format(elemento))
lista_prueba = [1, 5, "Hola",-2]
elemento1 = int(input('Ingresa un 10'))
agregar_una_vez(lista_prueba, elemento1)
elemento2 = input('Ingresa un Hola')
agregar_una_vez(lista_prueba, elemento2)
elemento3 = int(input('Ingresa un -2'))
agregar_una_vez(lista_prueba, elemento3)
print(lista_prueba)
| {
"content_hash": "596580569bb07e4f2f75520452783c99",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 84,
"avg_line_length": 33,
"alnum_prop": 0.6755793226381461,
"repo_name": "maumg1196/PythonRandomExercices",
"id": "c6d0f9ad787baeeb97de0c39cc011bafac0e0eb2",
"size": "562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "16 Agregar una vez/agregar_una_vez.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14685"
}
],
"symlink_target": ""
} |
"""Fichier contenant le type viande."""
from .nourriture import Nourriture
class Viande(Nourriture):
"""Type d'objet: viande.
"""
nom_type = "viande"
def __init__(self, cle=""):
"""Constructeur de l'objet"""
Nourriture.__init__(self, cle)
self.nourrissant = 3
| {
"content_hash": "e9a7ac8b1d2c176b572ca117ac1ed559",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 39,
"avg_line_length": 21.133333333333333,
"alnum_prop": 0.5646687697160884,
"repo_name": "vlegoff/tsunami",
"id": "d4369a8326b5714bcc85280bfd5d3259698546e2",
"size": "1887",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/objet/types/viande.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7930908"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
} |
import numpy as np
from pandas.util._decorators import Appender, Substitution
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.common import is_integer_dtype, is_list_like, is_scalar
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
import pandas.core.common as com
from pandas.core.frame import _shared_docs
from pandas.core.groupby import Grouper
from pandas.core.index import Index, MultiIndex, _get_objs_combined_axis
from pandas.core.reshape.concat import concat
from pandas.core.reshape.util import cartesian_product
from pandas.core.series import Series
# Note: We need to make sure `frame` is imported before `pivot`, otherwise
# _shared_docs['pivot_table'] will not yet exist. TODO: Fix this dependency
@Substitution("\ndata : DataFrame")
@Appender(_shared_docs["pivot_table"], indents=1)
def pivot_table(
data,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins=False,
dropna=True,
margins_name="All",
observed=False,
):
index = _convert_by(index)
columns = _convert_by(columns)
if isinstance(aggfunc, list):
pieces = []
keys = []
for func in aggfunc:
table = pivot_table(
data,
values=values,
index=index,
columns=columns,
fill_value=fill_value,
aggfunc=func,
margins=margins,
dropna=dropna,
margins_name=margins_name,
observed=observed,
)
pieces.append(table)
keys.append(getattr(func, "__name__", func))
return concat(pieces, keys=keys, axis=1)
keys = index + columns
values_passed = values is not None
if values_passed:
if is_list_like(values):
values_multi = True
values = list(values)
else:
values_multi = False
values = [values]
# GH14938 Make sure value labels are in data
for i in values:
if i not in data:
raise KeyError(i)
to_filter = []
for x in keys + values:
if isinstance(x, Grouper):
x = x.key
try:
if x in data:
to_filter.append(x)
except TypeError:
pass
if len(to_filter) < len(data.columns):
data = data[to_filter]
else:
values = data.columns
for key in keys:
try:
values = values.drop(key)
except (TypeError, ValueError, KeyError):
pass
values = list(values)
grouped = data.groupby(keys, observed=observed)
agged = grouped.agg(aggfunc)
if dropna and isinstance(agged, ABCDataFrame) and len(agged.columns):
agged = agged.dropna(how="all")
# gh-21133
# we want to down cast if
# the original values are ints
# as we grouped with a NaN value
# and then dropped, coercing to floats
for v in values:
if (
v in data
and is_integer_dtype(data[v])
and v in agged
and not is_integer_dtype(agged[v])
):
agged[v] = maybe_downcast_to_dtype(agged[v], data[v].dtype)
table = agged
if table.index.nlevels > 1:
# Related GH #17123
# If index_names are integers, determine whether the integers refer
# to the level position or name.
index_names = agged.index.names[: len(index)]
to_unstack = []
for i in range(len(index), len(keys)):
name = agged.index.names[i]
if name is None or name in index_names:
to_unstack.append(i)
else:
to_unstack.append(name)
table = agged.unstack(to_unstack)
if not dropna:
from pandas import MultiIndex
if table.index.nlevels > 1:
m = MultiIndex.from_arrays(
cartesian_product(table.index.levels), names=table.index.names
)
table = table.reindex(m, axis=0)
if table.columns.nlevels > 1:
m = MultiIndex.from_arrays(
cartesian_product(table.columns.levels), names=table.columns.names
)
table = table.reindex(m, axis=1)
if isinstance(table, ABCDataFrame):
table = table.sort_index(axis=1)
if fill_value is not None:
table = table.fillna(value=fill_value, downcast="infer")
if margins:
if dropna:
data = data[data.notna().all(axis=1)]
table = _add_margins(
table,
data,
values,
rows=index,
cols=columns,
aggfunc=aggfunc,
observed=dropna,
margins_name=margins_name,
fill_value=fill_value,
)
# discard the top level
if (
values_passed
and not values_multi
and not table.empty
and (table.columns.nlevels > 1)
):
table = table[values[0]]
if len(index) == 0 and len(columns) > 0:
table = table.T
# GH 15193 Make sure empty columns are removed if dropna=True
if isinstance(table, ABCDataFrame) and dropna:
table = table.dropna(how="all", axis=1)
return table
def _add_margins(
table,
data,
values,
rows,
cols,
aggfunc,
observed=None,
margins_name="All",
fill_value=None,
):
if not isinstance(margins_name, str):
raise ValueError("margins_name argument must be a string")
msg = 'Conflicting name "{name}" in margins'.format(name=margins_name)
for level in table.index.names:
if margins_name in table.index.get_level_values(level):
raise ValueError(msg)
grand_margin = _compute_grand_margin(data, values, aggfunc, margins_name)
# could be passed a Series object with no 'columns'
if hasattr(table, "columns"):
for level in table.columns.names[1:]:
if margins_name in table.columns.get_level_values(level):
raise ValueError(msg)
if len(rows) > 1:
key = (margins_name,) + ("",) * (len(rows) - 1)
else:
key = margins_name
if not values and isinstance(table, ABCSeries):
# If there are no values and the table is a series, then there is only
# one column in the data. Compute grand margin and return it.
return table.append(Series({key: grand_margin[margins_name]}))
if values:
marginal_result_set = _generate_marginal_results(
table,
data,
values,
rows,
cols,
aggfunc,
observed,
grand_margin,
margins_name,
)
if not isinstance(marginal_result_set, tuple):
return marginal_result_set
result, margin_keys, row_margin = marginal_result_set
else:
marginal_result_set = _generate_marginal_results_without_values(
table, data, rows, cols, aggfunc, observed, margins_name
)
if not isinstance(marginal_result_set, tuple):
return marginal_result_set
result, margin_keys, row_margin = marginal_result_set
row_margin = row_margin.reindex(result.columns, fill_value=fill_value)
# populate grand margin
for k in margin_keys:
if isinstance(k, str):
row_margin[k] = grand_margin[k]
else:
row_margin[k] = grand_margin[k[0]]
from pandas import DataFrame
margin_dummy = DataFrame(row_margin, columns=[key]).T
row_names = result.index.names
try:
for dtype in set(result.dtypes):
cols = result.select_dtypes([dtype]).columns
margin_dummy[cols] = margin_dummy[cols].astype(dtype)
result = result.append(margin_dummy)
except TypeError:
# we cannot reshape, so coerce the axis
result.index = result.index._to_safe_for_reshape()
result = result.append(margin_dummy)
result.index.names = row_names
return result
def _compute_grand_margin(data, values, aggfunc, margins_name="All"):
if values:
grand_margin = {}
for k, v in data[values].items():
try:
if isinstance(aggfunc, str):
grand_margin[k] = getattr(v, aggfunc)()
elif isinstance(aggfunc, dict):
if isinstance(aggfunc[k], str):
grand_margin[k] = getattr(v, aggfunc[k])()
else:
grand_margin[k] = aggfunc[k](v)
else:
grand_margin[k] = aggfunc(v)
except TypeError:
pass
return grand_margin
else:
return {margins_name: aggfunc(data.index)}
def _generate_marginal_results(
table, data, values, rows, cols, aggfunc, observed, grand_margin, margins_name="All"
):
if len(cols) > 0:
# need to "interleave" the margins
table_pieces = []
margin_keys = []
def _all_key(key):
return (key, margins_name) + ("",) * (len(cols) - 1)
if len(rows) > 0:
margin = data[rows + values].groupby(rows, observed=observed).agg(aggfunc)
cat_axis = 1
for key, piece in table.groupby(level=0, axis=cat_axis, observed=observed):
all_key = _all_key(key)
# we are going to mutate this, so need to copy!
piece = piece.copy()
try:
piece[all_key] = margin[key]
except TypeError:
# we cannot reshape, so coerce the axis
piece.set_axis(
piece._get_axis(cat_axis)._to_safe_for_reshape(),
axis=cat_axis,
inplace=True,
)
piece[all_key] = margin[key]
table_pieces.append(piece)
margin_keys.append(all_key)
else:
margin = grand_margin
cat_axis = 0
for key, piece in table.groupby(level=0, axis=cat_axis, observed=observed):
all_key = _all_key(key)
table_pieces.append(piece)
table_pieces.append(Series(margin[key], index=[all_key]))
margin_keys.append(all_key)
result = concat(table_pieces, axis=cat_axis)
if len(rows) == 0:
return result
else:
result = table
margin_keys = table.columns
if len(cols) > 0:
row_margin = data[cols + values].groupby(cols, observed=observed).agg(aggfunc)
row_margin = row_margin.stack()
# slight hack
new_order = [len(cols)] + list(range(len(cols)))
row_margin.index = row_margin.index.reorder_levels(new_order)
else:
row_margin = Series(np.nan, index=result.columns)
return result, margin_keys, row_margin
def _generate_marginal_results_without_values(
table, data, rows, cols, aggfunc, observed, margins_name="All"
):
if len(cols) > 0:
# need to "interleave" the margins
margin_keys = []
def _all_key():
if len(cols) == 1:
return margins_name
return (margins_name,) + ("",) * (len(cols) - 1)
if len(rows) > 0:
margin = data[rows].groupby(rows, observed=observed).apply(aggfunc)
all_key = _all_key()
table[all_key] = margin
result = table
margin_keys.append(all_key)
else:
margin = data.groupby(level=0, axis=0, observed=observed).apply(aggfunc)
all_key = _all_key()
table[all_key] = margin
result = table
margin_keys.append(all_key)
return result
else:
result = table
margin_keys = table.columns
if len(cols):
row_margin = data[cols].groupby(cols, observed=observed).apply(aggfunc)
else:
row_margin = Series(np.nan, index=result.columns)
return result, margin_keys, row_margin
def _convert_by(by):
if by is None:
by = []
elif (
is_scalar(by)
or isinstance(by, (np.ndarray, Index, ABCSeries, Grouper))
or hasattr(by, "__call__")
):
by = [by]
else:
by = list(by)
return by
@Substitution("\ndata : DataFrame")
@Appender(_shared_docs["pivot"], indents=1)
def pivot(data, index=None, columns=None, values=None):
if values is None:
cols = [columns] if index is None else [index, columns]
append = index is None
indexed = data.set_index(cols, append=append)
else:
if index is None:
index = data.index
else:
index = data[index]
index = MultiIndex.from_arrays([index, data[columns]])
if is_list_like(values) and not isinstance(values, tuple):
# Exclude tuple because it is seen as a single column name
indexed = data._constructor(
data[values].values, index=index, columns=values
)
else:
indexed = data._constructor_sliced(data[values].values, index=index)
return indexed.unstack(columns)
def crosstab(
index,
columns,
values=None,
rownames=None,
colnames=None,
aggfunc=None,
margins=False,
margins_name="All",
dropna=True,
normalize=False,
):
"""
Compute a simple cross tabulation of two (or more) factors. By default
computes a frequency table of the factors unless an array of values and an
aggregation function are passed.
Parameters
----------
index : array-like, Series, or list of arrays/Series
Values to group by in the rows.
columns : array-like, Series, or list of arrays/Series
Values to group by in the columns.
values : array-like, optional
Array of values to aggregate according to the factors.
Requires `aggfunc` be specified.
rownames : sequence, default None
If passed, must match number of row arrays passed.
colnames : sequence, default None
If passed, must match number of column arrays passed.
aggfunc : function, optional
If specified, requires `values` be specified as well.
margins : bool, default False
Add row/column margins (subtotals).
margins_name : str, default 'All'
Name of the row/column that will contain the totals
when margins is True.
.. versionadded:: 0.21.0
dropna : bool, default True
Do not include columns whose entries are all NaN.
normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False
Normalize by dividing all values by the sum of values.
- If passed 'all' or `True`, will normalize over all values.
- If passed 'index' will normalize over each row.
- If passed 'columns' will normalize over each column.
- If margins is `True`, will also normalize margin values.
.. versionadded:: 0.18.1
Returns
-------
DataFrame
Cross tabulation of the data.
See Also
--------
DataFrame.pivot : Reshape data based on column values.
pivot_table : Create a pivot table as a DataFrame.
Notes
-----
Any Series passed will have their name attributes used unless row or column
names for the cross-tabulation are specified.
Any input passed containing Categorical data will have **all** of its
categories included in the cross-tabulation, even if the actual data does
not contain any instances of a particular category.
In the event that there aren't overlapping indexes an empty DataFrame will
be returned.
Examples
--------
>>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar",
... "bar", "bar", "foo", "foo", "foo"], dtype=object)
>>> b = np.array(["one", "one", "one", "two", "one", "one",
... "one", "two", "two", "two", "one"], dtype=object)
>>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny",
... "shiny", "dull", "shiny", "shiny", "shiny"],
... dtype=object)
>>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c'])
b one two
c dull shiny dull shiny
a
bar 1 2 1 0
foo 2 2 1 2
Here 'c' and 'f' are not represented in the data and will not be
shown in the output because dropna is True by default. Set
dropna=False to preserve categories with no data.
>>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c'])
>>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f'])
>>> pd.crosstab(foo, bar)
col_0 d e
row_0
a 1 0
b 0 1
>>> pd.crosstab(foo, bar, dropna=False)
col_0 d e f
row_0
a 1 0 0
b 0 1 0
c 0 0 0
"""
index = com.maybe_make_list(index)
columns = com.maybe_make_list(columns)
rownames = _get_names(index, rownames, prefix="row")
colnames = _get_names(columns, colnames, prefix="col")
common_idx = _get_objs_combined_axis(index + columns, intersect=True, sort=False)
data = {}
data.update(zip(rownames, index))
data.update(zip(colnames, columns))
if values is None and aggfunc is not None:
raise ValueError("aggfunc cannot be used without values.")
if values is not None and aggfunc is None:
raise ValueError("values cannot be used without an aggfunc.")
from pandas import DataFrame
df = DataFrame(data, index=common_idx)
if values is None:
df["__dummy__"] = 0
kwargs = {"aggfunc": len, "fill_value": 0}
else:
df["__dummy__"] = values
kwargs = {"aggfunc": aggfunc}
table = df.pivot_table(
"__dummy__",
index=rownames,
columns=colnames,
margins=margins,
margins_name=margins_name,
dropna=dropna,
**kwargs
)
# Post-process
if normalize is not False:
table = _normalize(
table, normalize=normalize, margins=margins, margins_name=margins_name
)
return table
def _normalize(table, normalize, margins, margins_name="All"):
if not isinstance(normalize, (bool, str)):
axis_subs = {0: "index", 1: "columns"}
try:
normalize = axis_subs[normalize]
except KeyError:
raise ValueError("Not a valid normalize argument")
if margins is False:
# Actual Normalizations
normalizers = {
"all": lambda x: x / x.sum(axis=1).sum(axis=0),
"columns": lambda x: x / x.sum(),
"index": lambda x: x.div(x.sum(axis=1), axis=0),
}
normalizers[True] = normalizers["all"]
try:
f = normalizers[normalize]
except KeyError:
raise ValueError("Not a valid normalize argument")
table = f(table)
table = table.fillna(0)
elif margins is True:
# keep index and column of pivoted table
table_index = table.index
table_columns = table.columns
# check if margin name is in (for MI cases) or equal to last
# index/column and save the column and index margin
if (margins_name not in table.iloc[-1, :].name) | (
margins_name != table.iloc[:, -1].name
):
raise ValueError("{} not in pivoted DataFrame".format(margins_name))
column_margin = table.iloc[:-1, -1]
index_margin = table.iloc[-1, :-1]
# keep the core table
table = table.iloc[:-1, :-1]
# Normalize core
table = _normalize(table, normalize=normalize, margins=False)
# Fix Margins
if normalize == "columns":
column_margin = column_margin / column_margin.sum()
table = concat([table, column_margin], axis=1)
table = table.fillna(0)
table.columns = table_columns
elif normalize == "index":
index_margin = index_margin / index_margin.sum()
table = table.append(index_margin)
table = table.fillna(0)
table.index = table_index
elif normalize == "all" or normalize is True:
column_margin = column_margin / column_margin.sum()
index_margin = index_margin / index_margin.sum()
index_margin.loc[margins_name] = 1
table = concat([table, column_margin], axis=1)
table = table.append(index_margin)
table = table.fillna(0)
table.index = table_index
table.columns = table_columns
else:
raise ValueError("Not a valid normalize argument")
else:
raise ValueError("Not a valid margins argument")
return table
def _get_names(arrs, names, prefix="row"):
if names is None:
names = []
for i, arr in enumerate(arrs):
if isinstance(arr, ABCSeries) and arr.name is not None:
names.append(arr.name)
else:
names.append("{prefix}_{i}".format(prefix=prefix, i=i))
else:
if len(names) != len(arrs):
raise AssertionError("arrays and names must have the same length")
if not isinstance(names, list):
names = list(names)
return names
| {
"content_hash": "4bf97b2fbddc7e1b1a92a3e7708696aa",
"timestamp": "",
"source": "github",
"line_count": 684,
"max_line_length": 88,
"avg_line_length": 31.480994152046783,
"alnum_prop": 0.5675010449078159,
"repo_name": "kushalbhola/MyStuff",
"id": "3d93042486c8a37815b5ee69b6ac05920e27bbb5",
"size": "21533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Practice/PythonApplication/env/Lib/site-packages/pandas/core/reshape/pivot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1330"
},
{
"name": "C#",
"bytes": "332967"
},
{
"name": "CSS",
"bytes": "1451"
},
{
"name": "HTML",
"bytes": "7539"
},
{
"name": "Java",
"bytes": "14860"
},
{
"name": "JavaScript",
"bytes": "9843"
},
{
"name": "Jupyter Notebook",
"bytes": "374013"
},
{
"name": "PowerShell",
"bytes": "1448"
},
{
"name": "Python",
"bytes": "6511820"
},
{
"name": "Tcl",
"bytes": "24289"
},
{
"name": "TypeScript",
"bytes": "15697"
}
],
"symlink_target": ""
} |
class Event(object):
_propagation_stopped = False
def __init__(self, data=None):
self.data = data or {}
def stop_propagation(self):
self._propagation_stopped = True
def is_propagation_stop(self):
return self._propagation_stopped
def get(self, name):
return self.data[name]
def set(self, name, value):
self.data[name] = value
def has(self, name):
return name in self.data
class Dispatcher(object):
def __init__(self, logger=None):
self.listeners = {}
self.logger = logger
def dispatch(self, name, event=None):
if isinstance(event, dict):
event = Event(event)
event = event or Event()
if self.logger:
self.logger.debug("event.dispatch: %s" % name)
if name not in self.listeners:
return event
for listener in self.get_listeners(name):
if self.logger:
self.logger.debug("event.dispatch: %s to %s" % (name, listener))
listener(event)
if event.stop_propagation():
if self.logger:
self.logger.debug("event.dispatch: %s is stopped" % name)
break
return event
def get_listeners(self, name):
"""
Return the callables related to name
"""
return list(map(lambda listener: listener[0], self.listeners[name]))
def add_listener(self, name, listener, priority=0):
"""
Add a new listener to the dispatch
"""
if name not in self.listeners:
self.listeners[name] = []
self.listeners[name].append((listener, priority))
# reorder event
self.listeners[name].sort(key=lambda listener: listener[1], reverse=True)
def remove_listener(self, name, listener):
if name not in self.listeners:
return
self.listeners[name] = [item for item in self.listeners[name] if item != listener]
def remove_listeners(self, name):
if name in self.listeners:
self.listeners[name] = []
| {
"content_hash": "e06dd6c49884c2a232a9fce1f195c665",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 90,
"avg_line_length": 26.860759493670887,
"alnum_prop": 0.5711592836946278,
"repo_name": "rande/python-simple-ioc",
"id": "e261b84759c1414e1ffb22d7bc9ddc824046f65c",
"size": "2724",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ioc/event.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "243"
},
{
"name": "Python",
"bytes": "128014"
}
],
"symlink_target": ""
} |
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
class BTShareEntryParams(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"application_id": (str,), # noqa: E501
"company_id": (str,), # noqa: E501
"email": (str,), # noqa: E501
"entry_type": (int,), # noqa: E501
"team_id": (str,), # noqa: E501
"user_id": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"application_id": "applicationId", # noqa: E501
"company_id": "companyId", # noqa: E501
"email": "email", # noqa: E501
"entry_type": "entryType", # noqa: E501
"team_id": "teamId", # noqa: E501
"user_id": "userId", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_share_entry_params.BTShareEntryParams - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
application_id (str): [optional] # noqa: E501
company_id (str): [optional] # noqa: E501
email (str): [optional] # noqa: E501
entry_type (int): [optional] # noqa: E501
team_id (str): [optional] # noqa: E501
user_id (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
| {
"content_hash": "e2e871e0e102f73474435d3e8897109d",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 80,
"avg_line_length": 34.02564102564103,
"alnum_prop": 0.5619819140919367,
"repo_name": "onshape-public/onshape-clients",
"id": "b800dcb60b64724beabae59e1fcde48013dd6c55",
"size": "5325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/onshape_client/oas/models/bt_share_entry_params.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4873"
},
{
"name": "Go",
"bytes": "59674"
},
{
"name": "HTML",
"bytes": "3851790"
},
{
"name": "JavaScript",
"bytes": "2217"
},
{
"name": "Makefile",
"bytes": "559"
},
{
"name": "Python",
"bytes": "7560009"
},
{
"name": "Shell",
"bytes": "3475"
},
{
"name": "TypeScript",
"bytes": "1412661"
}
],
"symlink_target": ""
} |
"""
This object contains a list of PolarizedPhoton objects, characterized by energy, direction vector and Stokes vector.
This object is used as input to and output from the passive crystal widget.
"""
import numpy
from crystalpy.util.PhotonBunch import PhotonBunch
class PolarizedPhotonBunch(PhotonBunch):
"""
is a collection of PolarizedPhoton objects, making up the photon beam.
"""
def __init__(self, polarized_photons=None):
"""
:param polarized_photons: bunch of PolarizedPhoton objects.
:type polarized_photons: list(PolarizedPhoton, PolarizedPhoton, ...)
"""
if polarized_photons == None:
self.polarized_photon_bunch = []
else:
self.polarized_photon_bunch = polarized_photons
def toDictionary(self):
"""
defines a dictionary containing information about the bunch.
"""
array_dict = PhotonBunch.toDictionary(self)
stokes = numpy.zeros([4, len(self)])
polarization_degrees = numpy.zeros(len(self))
for i,polarized_photon in enumerate(self):
stokes[0, i] = polarized_photon.stokesVector().s0
stokes[1, i] = polarized_photon.stokesVector().s1
stokes[2, i] = polarized_photon.stokesVector().s2
stokes[3, i] = polarized_photon.stokesVector().s3
polarization_degrees[i] = polarized_photon.circularPolarizationDegree()
array_dict["s0"] = stokes[0, :]
array_dict["s1"] = stokes[1, :]
array_dict["s2"] = stokes[2, :]
array_dict["s3"] = stokes[3, :]
array_dict["polarization degree"] = polarization_degrees
return array_dict
def toString(self):
"""
:return: string containing the parameters characterizing each photon in the bunch.
"""
bunch_string = str()
for i in range(self.getNumberOfPhotons()):
photon = self.getPhotonIndex(i)
string_to_attach = str(photon.energy()) + " " + \
photon.unitDirectionVector().toString() + "\n"
bunch_string += string_to_attach
return bunch_string | {
"content_hash": "dd58bac91ef082d1dae07c601e7bab37",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 116,
"avg_line_length": 35.47540983606557,
"alnum_prop": 0.6182994454713494,
"repo_name": "edocappelli/crystalpy",
"id": "3645202955243f58d53434cb65d6c472dd981f76",
"size": "2164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crystalpy/util/PolarizedPhotonBunch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "320393"
}
],
"symlink_target": ""
} |
from datab import database_binding
from router import router_module
'''
################################################################################
Server-Client Text Protocol (SCTP)
Case in Server
Serves as a medium layer between the core algorithm machine and transmitter.
The result of the calculation by router module is incomplete. We need to transfer
more information about the route, ex, the price, time and distance between
arbitrary two nodes. Now, we need to extend the result to be sent to the client,
adding redundancies.
Besides, we also added two new commands, query and query_all.
The query command can seach for the (time, price, distance) between two nodes.
The query_all command can parse the path-list and return every information between
two nodes of each hop. Now enjoy it.
################################################################################
'''
class scprotocol():
data_fl=database_binding.data_flight
data_bus=database_binding.data_bus
data_tr=database_binding.data_train
def __init__(self, router, db):
self.data_all=db.data_all
self.router_binding=router
def dist(self, v1, v2, mode, mean):
meandict={0:'flight', 1:'train', 2:'bus'}
return self.data_all[meandict[mean]][mode][v1][v2][0]
def tdist(self, v1, v2, mean):
return self.dist(v1, v2, 'time', mean)
def cdist(self, v1, v2, mean):
return self.dist(v1, v2, 'price', mean)
def ddist(self, v1, v2, mean):
return self.dist(v1, v2, 'distance', mean)
def interpreter(cmd):
pass
'''
mcp & mtp return a triple, with the first place being the path, the second
being the time and the third being the cost.
'''
def mtp(self, id_src, id_dest):
raw=self.router_binding.minimal_time_path(id_src, id_dest)
path_matrix=raw[0]
mintime=raw[1]
cost_total=0
for index in range(0, len(path_matrix)-1):
first=path_matrix[index]
second=path_matrix[index+1]
mean=first[1] #get the mean of transportation.
cost_total=cost_total+self.cdist(first[0], second[0], mean)
return (path_matrix, mintime, cost_total)
def mcp(self, id_src, id_dest):
raw=self.router_binding.minimal_cost_path(id_src, id_dest)
path_matrix=raw[0]
mincost=raw[1]
time_total=0
for index in range(0, len(path_matrix)-1):
first=path_matrix[index]
second=path_matrix[index+1]
mean=first[1] #get the mean of transportation.
time_total=time_total+self.tdist(first[0], second[0], mean)
return (path_matrix, time_total, mincost)
def query(self, from_node, to_node, mean_of_transportation):
return (self.tdist(from_node, to_node, mean_of_transportation),
self.cdist(from_node, to_node, mean_of_transportation))#,
# self.ddist(from_node, to_node, mean_of_transportation)
#)
#(time, cost, distance)
def query_all(self, *path_list):
res=[]
for index in range(len(path_list)-1):
current=path_list[index]
nextn=path_list[index+1]
mean=current[1]
query_res=self.query(current[0], nextn[0], mean)
res.append([current[0], nextn[0], mean,
query_res[0], query_res[1]])#, query_res[2]])
return (res)
| {
"content_hash": "f9ea2115198995eb2c9ed956b2ad25e0",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 82,
"avg_line_length": 37.375,
"alnum_prop": 0.572463768115942,
"repo_name": "niwtr/map-walker",
"id": "68bea0e44d09c61f985c17042b4e997be63144e7",
"size": "3588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/server/textprotocol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1402"
}
],
"symlink_target": ""
} |
from gzip import open as gzip_open
from shutil import copyfileobj, move
def gzip_decompress_file(gzip_file_path):
with gzip_open(gzip_file_path, mode="rt") as gzip_file:
file_path = gzip_file_path[: -len(".gz")]
file_path_temporary = "{}.temporary".format(file_path)
with open(file_path_temporary, mode="wt") as file_temporary:
copyfileobj(gzip_file, file_temporary)
move(file_path_temporary, file_path)
return file_path
| {
"content_hash": "087abe07aa72ac386161ff5aef5ed2ee",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 68,
"avg_line_length": 25.157894736842106,
"alnum_prop": 0.6652719665271967,
"repo_name": "UCSD-CCAL/ccal",
"id": "a03b330acf2eab6fc74a1b6b6170f8fe8dc6971f",
"size": "478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ccal/gzip_decompress_file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "20830"
},
{
"name": "Python",
"bytes": "294577"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.