text stringlengths 4 1.02M | meta dict |
|---|---|
from dogetip import app
app.run(
debug=app.config['DEBUG'],
host=app.config['HOST'],
port=app.config['PORT']
)
| {
"content_hash": "c64d3e7b0a76491d793c49b629cf989c",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 28,
"avg_line_length": 19.5,
"alnum_prop": 0.6752136752136753,
"repo_name": "mmattax/hubot-dogetip",
"id": "8bbb892ab63d3db764d3b9669510a45440617c8a",
"size": "135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1686"
}
],
"symlink_target": ""
} |
import re
import hashlib
import time
from ct.crypto import cert
from ct.proto import certificate_pb2
def from_cert(certificate, observations=[]):
"""Pulls out interesting fields from certificate, so format of data will
be similar in every database implementation."""
proto = certificate_pb2.X509Description()
proto.der = certificate.to_der()
try:
for sub in [(type_.short_name,
to_unicode('.'.join(process_name(value.human_readable()))))
for type_, value in certificate.subject()]:
proto_sub = proto.subject.add()
proto_sub.type, proto_sub.value = sub
except cert.CertificateError:
pass
try:
for iss in [(type_.short_name,
to_unicode('.'.join(process_name(value.human_readable()))))
for type_, value in certificate.issuer()]:
proto_iss = proto.issuer.add()
proto_iss.type, proto_iss.value = iss
except cert.CertificateError:
pass
try:
for alt in certificate.subject_alternative_names():
proto_alt = proto.subject_alternative_names.add()
proto_alt.type, proto_alt.value = (alt.component_key(),
to_unicode('.'.join(process_name(
alt.component_value().human_readable()))))
except cert.CertificateError:
pass
try:
proto.version = str(certificate.version())
except cert.CertificateError:
pass
try:
proto.serial_number = str(certificate.serial_number().human_readable()
.upper().replace(':', ''))
except cert.CertificateError:
pass
try:
proto.validity.not_before, proto.validity.not_after = (
1000 * int(time.mktime(certificate.not_before())),
1000 * int(time.mktime(certificate.not_after())))
except cert.CertificateError:
pass
proto.sha256_hash = hashlib.sha256(proto.der).digest()
for observation in observations:
proto_obs = proto.observations.add()
if observation.description:
proto_obs.description = observation.description
if observation.reason:
proto_obs.reason = observation.reason
proto_obs.details = observation.details_to_proto()
return proto
def to_unicode(str_):
return unicode(str_, 'utf-8', 'replace')
def process_name(subject, reverse=True):
# RFCs for DNS names: RFC 1034 (sect. 3.5), RFC 1123 (sect. 2.1);
# for common names: RFC 5280.
# However we probably do not care about full RFC compliance here
# (e.g. we ignore that a compliant label cannot begin with a hyphen,
# we accept multi-wildcard names, etc.).
#
# For now, make indexing work for the common case:
# allow letter-digit-hyphen, as well as wildcards (RFC 2818).
forbidden = re.compile(r"[^a-z\d\-\*]")
labels = subject.lower().split(".")
valid_dns_name = len(labels) > 1 and all(
map(lambda x: len(x) and not forbidden.search(x), labels))
if valid_dns_name:
# ["com", "example", "*"], ["com", "example", "mail"],
# ["localhost"], etc.
return list(reversed(labels)) if reverse else labels
else:
# ["John Smith"], ["Trustworthy Certificate Authority"],
# ["google.com\x00"], etc.
# TODO(ekasper): figure out what to do (use stringprep as specified
# by RFC 5280?) to properly handle non-letter-digit-hyphen names.
return [subject]
| {
"content_hash": "306973abfccbecc42b0ac5e54f5ebcc8",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 80,
"avg_line_length": 36.46938775510204,
"alnum_prop": 0.6015668718522663,
"repo_name": "rep/certificate-transparency",
"id": "bc3ab0d122889e705fd991e6fcd7624ea12d2293",
"size": "3574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ct/client/db/cert_desc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2898"
},
{
"name": "C++",
"bytes": "1228105"
},
{
"name": "Go",
"bytes": "377770"
},
{
"name": "HTML",
"bytes": "1195"
},
{
"name": "Java",
"bytes": "106756"
},
{
"name": "Makefile",
"bytes": "3207"
},
{
"name": "Protocol Buffer",
"bytes": "23387"
},
{
"name": "Python",
"bytes": "685642"
},
{
"name": "Shell",
"bytes": "41289"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python2.7
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import hashlib
import itertools
import os
import sys
# configuration: a list of either strings or 2-tuples of strings
# a single string represents a static grpc_mdstr
# a 2-tuple represents a static grpc_mdelem (and appropriate grpc_mdstrs will
# also be created)
CONFIG = [
'grpc-timeout',
'grpc-internal-encoding-request',
'grpc-payload-bin',
':path',
'grpc-encoding',
'grpc-accept-encoding',
'user-agent',
':authority',
'host',
'grpc-message',
'grpc-status',
'grpc-tracing-bin',
'grpc-census-bin',
'',
('grpc-status', '0'),
('grpc-status', '1'),
('grpc-status', '2'),
('grpc-encoding', 'identity'),
('grpc-encoding', 'gzip'),
('grpc-encoding', 'deflate'),
('te', 'trailers'),
('content-type', 'application/grpc'),
(':method', 'POST'),
(':status', '200'),
(':status', '404'),
(':scheme', 'http'),
(':scheme', 'https'),
(':scheme', 'grpc'),
(':authority', ''),
(':method', 'GET'),
(':method', 'PUT'),
(':path', '/'),
(':path', '/index.html'),
(':status', '204'),
(':status', '206'),
(':status', '304'),
(':status', '400'),
(':status', '500'),
('accept-charset', ''),
('accept-encoding', ''),
('accept-encoding', 'gzip, deflate'),
('accept-language', ''),
('accept-ranges', ''),
('accept', ''),
('access-control-allow-origin', ''),
('age', ''),
('allow', ''),
('authorization', ''),
('cache-control', ''),
('content-disposition', ''),
('content-encoding', ''),
('content-language', ''),
('content-length', ''),
('content-location', ''),
('content-range', ''),
('content-type', ''),
('cookie', ''),
('date', ''),
('etag', ''),
('expect', ''),
('expires', ''),
('from', ''),
('host', ''),
('if-match', ''),
('if-modified-since', ''),
('if-none-match', ''),
('if-range', ''),
('if-unmodified-since', ''),
('last-modified', ''),
('load-reporting-initial', ''),
('load-reporting-trailing', ''),
('link', ''),
('location', ''),
('max-forwards', ''),
('proxy-authenticate', ''),
('proxy-authorization', ''),
('range', ''),
('referer', ''),
('refresh', ''),
('retry-after', ''),
('server', ''),
('set-cookie', ''),
('strict-transport-security', ''),
('transfer-encoding', ''),
('user-agent', ''),
('vary', ''),
('via', ''),
('www-authenticate', ''),
]
COMPRESSION_ALGORITHMS = [
'identity',
'deflate',
'gzip',
]
# utility: mangle the name of a config
def mangle(elem):
xl = {
'-': '_',
':': '',
'/': 'slash',
'.': 'dot',
',': 'comma',
' ': '_',
}
def m0(x):
if not x: return 'empty'
r = ''
for c in x:
put = xl.get(c, c.lower())
if not put: continue
last_is_underscore = r[-1] == '_' if r else True
if last_is_underscore and put == '_': continue
elif len(put) > 1:
if not last_is_underscore: r += '_'
r += put
r += '_'
else:
r += put
if r[-1] == '_': r = r[:-1]
return r
if isinstance(elem, tuple):
return 'grpc_mdelem_%s_%s' % (m0(elem[0]), m0(elem[1]))
else:
return 'grpc_mdstr_%s' % (m0(elem))
# utility: generate some hash value for a string
def fake_hash(elem):
return hashlib.md5(elem).hexdigest()[0:8]
# utility: print a big comment block into a set of files
def put_banner(files, banner):
for f in files:
print >>f, '/*'
for line in banner:
print >>f, ' * %s' % line
print >>f, ' */'
print >>f
# build a list of all the strings we need
all_strs = set()
all_elems = set()
static_userdata = {}
for elem in CONFIG:
if isinstance(elem, tuple):
all_strs.add(elem[0])
all_strs.add(elem[1])
all_elems.add(elem)
else:
all_strs.add(elem)
compression_elems = []
for mask in range(1, 1<<len(COMPRESSION_ALGORITHMS)):
val = ','.join(COMPRESSION_ALGORITHMS[alg]
for alg in range(0, len(COMPRESSION_ALGORITHMS))
if (1 << alg) & mask)
elem = ('grpc-accept-encoding', val)
all_strs.add(val)
all_elems.add(elem)
compression_elems.append(elem)
static_userdata[elem] = 1 + (mask | 1)
all_strs = sorted(list(all_strs), key=mangle)
all_elems = sorted(list(all_elems), key=mangle)
# output configuration
args = sys.argv[1:]
H = None
C = None
D = None
if args:
if 'header' in args:
H = sys.stdout
else:
H = open('/dev/null', 'w')
if 'source' in args:
C = sys.stdout
else:
C = open('/dev/null', 'w')
if 'dictionary' in args:
D = sys.stdout
else:
D = open('/dev/null', 'w')
else:
H = open(os.path.join(
os.path.dirname(sys.argv[0]), '../../../src/core/lib/transport/static_metadata.h'), 'w')
C = open(os.path.join(
os.path.dirname(sys.argv[0]), '../../../src/core/lib/transport/static_metadata.c'), 'w')
D = open(os.path.join(
os.path.dirname(sys.argv[0]), '../../../test/core/end2end/fuzzers/hpack.dictionary'), 'w')
# copy-paste copyright notice from this file
with open(sys.argv[0]) as my_source:
copyright = []
for line in my_source:
if line[0] != '#': break
for line in my_source:
if line[0] == '#':
copyright.append(line)
break
for line in my_source:
if line[0] != '#':
break
copyright.append(line)
put_banner([H,C], [line[2:].rstrip() for line in copyright])
hex_bytes = [ord(c) for c in "abcdefABCDEF0123456789"]
def esc_dict(line):
out = "\""
for c in line:
if 32 <= c < 127:
if c != ord('"'):
out += chr(c)
else:
out += "\\\""
else:
out += "\\x%02X" % c
return out + "\""
put_banner([H,C],
"""WARNING: Auto-generated code.
To make changes to this file, change
tools/codegen/core/gen_static_metadata.py, and then re-run it.
See metadata.h for an explanation of the interface here, and metadata.c for
an explanation of what's going on.
""".splitlines())
print >>H, '#ifndef GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H'
print >>H, '#define GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H'
print >>H
print >>H, '#include "src/core/lib/transport/metadata.h"'
print >>H
print >>C, '#include "src/core/lib/transport/static_metadata.h"'
print >>C
print >>H, '#define GRPC_STATIC_MDSTR_COUNT %d' % len(all_strs)
print >>H, 'extern grpc_mdstr grpc_static_mdstr_table[GRPC_STATIC_MDSTR_COUNT];'
for i, elem in enumerate(all_strs):
print >>H, '/* "%s" */' % elem
print >>H, '#define %s (&grpc_static_mdstr_table[%d])' % (mangle(elem).upper(), i)
print >>H
print >>C, 'grpc_mdstr grpc_static_mdstr_table[GRPC_STATIC_MDSTR_COUNT];'
print >>C
print >>D, '# hpack fuzzing dictionary'
for i, elem in enumerate(all_strs):
print >>D, '%s' % (esc_dict([len(elem)] + [ord(c) for c in elem]))
for i, elem in enumerate(all_elems):
print >>D, '%s' % (esc_dict([0, len(elem[0])] + [ord(c) for c in elem[0]] +
[len(elem[1])] + [ord(c) for c in elem[1]]))
print >>H, '#define GRPC_STATIC_MDELEM_COUNT %d' % len(all_elems)
print >>H, 'extern grpc_mdelem grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];'
print >>H, 'extern uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT];'
for i, elem in enumerate(all_elems):
print >>H, '/* "%s": "%s" */' % elem
print >>H, '#define %s (&grpc_static_mdelem_table[%d])' % (mangle(elem).upper(), i)
print >>H
print >>C, 'grpc_mdelem grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];'
print >>C, 'uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = {'
print >>C, ' %s' % ','.join('%d' % static_userdata.get(elem, 0) for elem in all_elems)
print >>C, '};'
print >>C
def str_idx(s):
for i, s2 in enumerate(all_strs):
if s == s2:
return i
def md_idx(m):
for i, m2 in enumerate(all_elems):
if m == m2:
return i
print >>H, 'extern const uint8_t grpc_static_metadata_elem_indices[GRPC_STATIC_MDELEM_COUNT*2];'
print >>C, 'const uint8_t grpc_static_metadata_elem_indices[GRPC_STATIC_MDELEM_COUNT*2] = {'
print >>C, ','.join('%d' % str_idx(x) for x in itertools.chain.from_iterable([a,b] for a, b in all_elems))
print >>C, '};'
print >>C
print >>H, 'extern const char *const grpc_static_metadata_strings[GRPC_STATIC_MDSTR_COUNT];'
print >>C, 'const char *const grpc_static_metadata_strings[GRPC_STATIC_MDSTR_COUNT] = {'
print >>C, '%s' % ',\n'.join(' "%s"' % s for s in all_strs)
print >>C, '};'
print >>C
print >>H, 'extern const uint8_t grpc_static_accept_encoding_metadata[%d];' % (1 << len(COMPRESSION_ALGORITHMS))
print >>C, 'const uint8_t grpc_static_accept_encoding_metadata[%d] = {' % (1 << len(COMPRESSION_ALGORITHMS))
print >>C, '0,%s' % ','.join('%d' % md_idx(elem) for elem in compression_elems)
print >>C, '};'
print >>C
print >>H, '#define GRPC_MDELEM_ACCEPT_ENCODING_FOR_ALGORITHMS(algs) (&grpc_static_mdelem_table[grpc_static_accept_encoding_metadata[(algs)]])'
print >>H, '#endif /* GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H */'
H.close()
C.close()
| {
"content_hash": "96f612094766e6fda3b28bfbc525af7c",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 143,
"avg_line_length": 30.657971014492755,
"alnum_prop": 0.6019665311525008,
"repo_name": "soltanmm/grpc",
"id": "2a16baa1b975f31d0a35e319ebca3b093c9296b6",
"size": "10577",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/codegen/core/gen_static_metadata.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "27083"
},
{
"name": "C",
"bytes": "5708638"
},
{
"name": "C#",
"bytes": "1238502"
},
{
"name": "C++",
"bytes": "1893110"
},
{
"name": "CMake",
"bytes": "72370"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "JavaScript",
"bytes": "338059"
},
{
"name": "M4",
"bytes": "37018"
},
{
"name": "Makefile",
"bytes": "673405"
},
{
"name": "Objective-C",
"bytes": "285445"
},
{
"name": "PHP",
"bytes": "148571"
},
{
"name": "Protocol Buffer",
"bytes": "118490"
},
{
"name": "PureBasic",
"bytes": "147"
},
{
"name": "Python",
"bytes": "1158876"
},
{
"name": "Ruby",
"bytes": "580265"
},
{
"name": "Shell",
"bytes": "48311"
},
{
"name": "Swift",
"bytes": "5418"
}
],
"symlink_target": ""
} |
import tornado.ioloop
import tornado.web
import tornado.escape
import sys
class LogHandler(tornado.web.RequestHandler):
def post(self, device_id):
data = tornado.escape.json_decode(self.request.body)
sys.stderr.write("Received headers: %s\n"%str(self.request.headers))
sys.stderr.write("Received payload: %s\n"%str(data))
self.finish({'status': 'ok'})
def make_app():
return tornado.web.Application([
(r"/temperature/([^/]+)", LogHandler)])
if __name__=='__main__':
app = make_app()
app.listen(8889)
tornado.ioloop.IOLoop.current().start()
| {
"content_hash": "18f986c0a7b56616a6568e261f3eeaec",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 76,
"avg_line_length": 30.15,
"alnum_prop": 0.6533996683250415,
"repo_name": "hectortosa/py-temperature-recorder",
"id": "d4d8dbef7b873e5e384b137a7c07f273b92dd324",
"size": "626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "httpserver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6019"
}
],
"symlink_target": ""
} |
from wsgiref.handlers import format_date_time
from datetime import datetime
from time import mktime
from requests.auth import AuthBase
from .sign import Signer
class HTTPSignatureAuth(AuthBase):
'''
Sign a request using the http-signature scheme.
https://github.com/joyent/node-http-signature/blob/master/http_signing.md
key_id is the mandatory label indicating to the server which secret to use
secret is the filename of a pem file in the case of rsa, a password string in the case of an hmac algorithm
algorithm is one of the six specified algorithms
headers is a list of http headers to be included in the signing string, defaulting to "Date" alone.
'''
def __init__(self, key_id='', secret='', algorithm='rsa-sha256', headers=None, allow_agent=False):
self.signer = Signer(secret=secret, algorithm=algorithm, allow_agent=allow_agent)
self.key_id = key_id
self.headers = headers
self.signature_string_head = self.build_header_content()
def build_header_content(self):
param_map = {'keyId': self.key_id,
'algorithm': self.signer.algorithm}
if self.headers:
param_map['headers'] = ' '.join(self.headers)
kv = map('{0[0]}="{0[1]}"'.format, param_map.items())
kv_string = ','.join(kv)
sig_string = 'Signature {0} %s'.format(kv_string)
return sig_string
def __call__(self, r):
if 'Date' not in r.headers:
now = datetime.now()
stamp = mktime(now.timetuple())
r.headers['Date'] = format_date_time(stamp)
if self.headers:
signable_list = [r.headers[x] for x in self.headers]
signable = '\n'.join(signable_list)
else:
signable = r.headers['Date']
signature = self.signer.sign(signable)
r.headers['Authorization'] = self.signature_string_head % signature
return r
| {
"content_hash": "85614a7afb0bc4aa669ce47e376b9fa2",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 111,
"avg_line_length": 39.97959183673469,
"alnum_prop": 0.6309341500765697,
"repo_name": "atl/py-http-signature",
"id": "0e64af88ad2fbe53d6ddfa8702ea6398e397eb1f",
"size": "1959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "http_signature/requests_auth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41931"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import helper
import os
import unittest
import ops
class WorkspaceTestCase(unittest.TestCase):
def test_with(self):
prefix = 'prefix-'
suffix = '-suffix'
path = None
with ops.workspace(suffix=suffix, prefix=prefix) as w:
path = w.path
name = os.path.basename(w.path)
self.assertTrue(os.path.isdir(path))
self.assertTrue(name.startswith(prefix))
self.assertTrue(name.endswith(suffix))
self.assertFalse(os.path.exists(path))
def test_bad_permissions(self):
path = None
name = 'test'
with ops.workspace() as w:
path = w.path
ops.mkdir(w.join(name))
ops.chmod(w.path, 0000)
self.assertTrue(os.path.isdir(path))
self.assertFalse(os.path.exists(path))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "3ee65ccdf3ffb5b043fd8db62730b983",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 62,
"avg_line_length": 25.805555555555557,
"alnum_prop": 0.5866523143164694,
"repo_name": "silas/ops",
"id": "caa137fc09ba015c3fe182a4eba7de02d02d9e07",
"size": "929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_workspace.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "97220"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 0, transform = "None", sigma = 0.0, exog_count = 20, ar_order = 12); | {
"content_hash": "56ef3621bc42f2c94721fa68f02c27e0",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 164,
"avg_line_length": 37.714285714285715,
"alnum_prop": 0.7045454545454546,
"repo_name": "antoinecarme/pyaf",
"id": "cd218c9927eece12343a4e16b3bb6fb60bfc86e8",
"size": "264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_None/trend_MovingMedian/cycle_0/ar_12/test_artificial_1024_None_MovingMedian_0_12_20.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
"""Sample script creating some baseline predictions."""
import os
import numpy as np
import data
import utils
ALL_ZERO_PREDICTIONS_BASENAME = os.path.join('Predictions', 'all_zero')
AVG_PREDICTIONS_BASENAME = os.path.join('Predictions', 'average')
def predict_average(train_data, test_data):
targets = np.array([review.rating for review in train_data])
avg = targets.mean()
predictions = [avg] * len(test_data)
return predictions
def predict_zeros(test_data):
predictions = [0.] * len(test_data)
return predictions
def main():
dataset = data.load_pickled_data()
train_data = dataset['train']
test_data = dataset['test']
predictions_zero = predict_zeros(test_data)
pred_file_name = utils.generate_unqiue_file_name(
ALL_ZERO_PREDICTIONS_BASENAME, 'npy')
utils.dump_npy(predictions_zero, pred_file_name)
print 'Dumped predictions to %s' % pred_file_name
predictions_avg = predict_average(train_data, test_data)
pred_file_name = utils.generate_unqiue_file_name(
AVG_PREDICTIONS_BASENAME, 'npy')
utils.dump_npy(predictions_avg, pred_file_name)
print 'Dumped predictions to %s' % pred_file_name
if __name__ == '__main__':
main() | {
"content_hash": "f2af8e33de9b307e46116ca1559350fb",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 71,
"avg_line_length": 26.31111111111111,
"alnum_prop": 0.7094594594594594,
"repo_name": "Gabs48/ML_competition",
"id": "d8946376d393351baddba8f9b437375e6fb9b449",
"size": "1184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "create_baselines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "92418"
}
],
"symlink_target": ""
} |
import morepath
from .app import App
from .model import db
def run(): # pragma: no cover
db.bind('sqlite', 'morepath_ponyorm.db', create_db=True)
db.generate_mapping(create_tables=True)
morepath.autoscan()
morepath.run(App())
| {
"content_hash": "6083ce033f9f52be754f82a74c6fea90",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 60,
"avg_line_length": 20.666666666666668,
"alnum_prop": 0.6854838709677419,
"repo_name": "henri-hulski/morepath_ponyorm",
"id": "7c6e7126a106d41f434ff976934f4e256f830ade",
"size": "248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "morepath_ponyorm/run.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "9817"
}
],
"symlink_target": ""
} |
"""Errors used in the urlfetch API
developers.
"""
class Error(Exception):
"""Base URL fetcher error type."""
class InvalidURLError(Error):
"""Raised when the URL given is empty or invalid.
Only http: and https: URLs are allowed. The maximum URL length
allowed is 2048 characters. The login/pass portion is not
allowed. In deployed applications, only ports 80 and 443 for http
and https respectively are allowed.
"""
class DownloadError(Error):
"""Raised when the we could not fetch the URL for any reason.
Note that this exception is only raised when we could not contact the
server. HTTP errors (e.g., 404) are returned in as the status_code field
in the return value of Fetch, and no exception is raised.
"""
class ResponseTooLargeError(Error):
"""Raised when the response was too large and was truncated."""
def __init__(self, response):
self.response = response
class InvalidMethodError(Error):
"""Raised when an invalid value for 'method' is provided"""
class SSLCertificateError(Error):
"""Raised when an invalid server certificate is presented."""
| {
"content_hash": "738c4f288e55614a738db09bf1f75067",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 74,
"avg_line_length": 25.227272727272727,
"alnum_prop": 0.7279279279279279,
"repo_name": "SRabbelier/Melange",
"id": "65ef5fd8a505af1bdfdb3f8bb1df021f4089a363",
"size": "1711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thirdparty/google_appengine/google/appengine/api/urlfetch_errors.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "400472"
},
{
"name": "C++",
"bytes": "20"
},
{
"name": "Java",
"bytes": "1496"
},
{
"name": "JavaScript",
"bytes": "1623582"
},
{
"name": "PHP",
"bytes": "1032"
},
{
"name": "Perl",
"bytes": "177565"
},
{
"name": "Python",
"bytes": "15317793"
},
{
"name": "Ruby",
"bytes": "59"
},
{
"name": "Shell",
"bytes": "15303"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^get_schools$', views.get_schools, name='get_schools'),
url(r'^signup$', views.signup, name='signup'),
url(r'^login$', views.login, name='login'),
url(r'^logout$', views.logout, name='logout'),
url(r'^is_user_logged_in$', views.is_user_logged_in, name='is_user_logged_in'),
url(r'^user_info$', views.user_info, name='user_info'),
url(r'^user_interests$', views.user_interests, name='user_interests'),
url(r'^change_password$', views.change_password, name='change_password'),
url(r'^delete_user$', views.delete_user, name='delete_user'),
url(r'^forgot_password$', views.forgot_password, name='forgot_password'),
url(r'^contact$', views.contact, name='contact'),
url(r'^service_stats$', views.service_stats, name='service_stats'),
url(r'^check$', views.check, name='check'),
]
| {
"content_hash": "14a5bd65d0c555f848b4d4a701ec385d",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 83,
"avg_line_length": 47.578947368421055,
"alnum_prop": 0.6570796460176991,
"repo_name": "dimkarakostas/unimeet",
"id": "1a2f39f8268e0aaf9db2a5f677b2f2123aee4ea2",
"size": "904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/unimeet/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "26275"
},
{
"name": "HTML",
"bytes": "63175"
},
{
"name": "JavaScript",
"bytes": "91779"
},
{
"name": "Python",
"bytes": "36917"
},
{
"name": "Shell",
"bytes": "1425"
}
],
"symlink_target": ""
} |
import atexit
import itertools
import os
import pymongo
import requests
from tests import db_user, db_password
_mo_address = os.environ.get("MO_ADDRESS", "localhost:8889")
_mongo_start_port = int(os.environ.get("MONGO_PORT", 27017))
_free_port = itertools.count(_mongo_start_port)
DEFAULT_OPTIONS = {
'logappend': True,
'setParameter': {'enableTestCommands': 1}
}
_post_request_template = {}
if db_user and db_password:
_post_request_template = {'login': db_user, 'password': db_password}
def _proc_params(mongos=False):
params = dict(port=next(_free_port), **DEFAULT_OPTIONS)
if not mongos:
params['smallfiles'] = True
params['noprealloc'] = True
params['nojournal'] = True
return params
def _mo_url(resource, *args):
return 'http://' + '/'.join([_mo_address, resource] + list(args))
@atexit.register
def kill_all():
clusters = requests.get(_mo_url('sharded_clusters')).json()
repl_sets = requests.get(_mo_url('replica_sets')).json()
servers = requests.get(_mo_url('servers')).json()
for cluster in clusters['sharded_clusters']:
requests.delete(_mo_url('sharded_clusters', cluster['id']))
for rs in repl_sets['replica_sets']:
requests.delete(_mo_url('relica_sets', rs['id']))
for server in servers['servers']:
requests.delete(_mo_url('servers', server['id']))
class MCTestObject(object):
def get_config(self):
raise NotImplementedError
def _make_post_request(self):
config = _post_request_template.copy()
config.update(self.get_config())
return requests.post(
_mo_url(self._resource), timeout=None, json=config).json()
def client(self, **kwargs):
client = pymongo.MongoClient(self.uri, **kwargs)
if db_user:
client.admin.authenticate(db_user, db_password)
return client
def stop(self):
requests.delete(_mo_url(self._resource, self.id))
class Server(MCTestObject):
_resource = 'servers'
def __init__(self, id=None, uri=None):
self.id = id
self.uri = uri
def get_config(self):
return {'name': 'mongod', 'procParams': _proc_params()}
def start(self):
if self.id is None:
response = self._make_post_request()
self.id = response['id']
self.uri = response.get('mongodb_auth_uri', response['mongodb_uri'])
else:
requests.post(
_mo_url('servers', self.id), timeout=None,
json={'action': 'start'}
)
return self
def stop(self, destroy=True):
if destroy:
super(Server, self).stop()
else:
requests.post(_mo_url('servers', self.id), timeout=None,
json={'action': 'stop'})
class ReplicaSet(MCTestObject):
_resource = 'replica_sets'
def __init__(self, id=None, uri=None, primary=None, secondary=None):
self.id = id
self.uri = uri
self.primary = primary
self.secondary = secondary
def get_config(self):
return {
'members': [
{'procParams': _proc_params()},
{'procParams': _proc_params()},
{'rsParams': {'arbiterOnly': True},
'procParams': _proc_params()}
]
}
def _init_from_response(self, response):
self.id = response['id']
self.uri = response.get('mongodb_auth_uri', response['mongodb_uri'])
for member in response['members']:
if member['state'] == 1:
self.primary = Server(member['server_id'], member['host'])
elif member['state'] == 2:
self.secondary = Server(member['server_id'], member['host'])
return self
def start(self):
# We never need to restart a replica set, only start new ones.
return self._init_from_response(self._make_post_request())
class ShardedCluster(MCTestObject):
_resource = 'sharded_clusters'
def __init__(self):
self.id = None
self.uri = None
self.shards = []
def get_config(self):
return {
'shards': [
{'id': 'demo-set-0', 'shardParams': ReplicaSet().get_config()},
{'id': 'demo-set-1', 'shardParams': ReplicaSet().get_config()}
],
'routers': [_proc_params(mongos=True)],
'configsvrs': [_proc_params()]
}
def start(self):
# We never need to restart a sharded cluster, only start new ones.
response = self._make_post_request()
for shard in response['shards']:
if shard['id'] == 'demo-set-0':
repl1_id = shard['_id']
elif shard['id'] == 'demo-set-1':
repl2_id = shard['_id']
shard1 = requests.get(_mo_url('replica_sets', repl1_id)).json()
shard2 = requests.get(_mo_url('replica_sets', repl2_id)).json()
self.id = response['id']
self.uri = response.get('mongodb_auth_uri', response['mongodb_uri'])
self.shards = [ReplicaSet()._init_from_response(resp)
for resp in (shard1, shard2)]
return self | {
"content_hash": "6554946f011c7c1df74efb258c5cce2a",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 80,
"avg_line_length": 30.302325581395348,
"alnum_prop": 0.568495778971604,
"repo_name": "neo4j-contrib/neo4j_doc_manager",
"id": "f15ed7f8304ba92fa27e14f18815e012ea3bd0e3",
"size": "5786",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/setup_cluster.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "64898"
}
],
"symlink_target": ""
} |
import mimetypes
import cherrypy
from girder.api import access
from girder.api.describe import autoDescribeRoute, describeRoute, Description
from girder.api.rest import loadmodel, RestException
from girder.constants import AccessType, SortDir, TokenScope
from girder.exceptions import AccessException, GirderException, ValidationException
from girder.models.file import File
from girder.utility import RequestBodyStream
from ..models.dataset import Dataset
from ..models.image import Image
from .base import IsicResource # noqa
from ..models.user import User
CSV_FORMATS = [
'text/csv',
'application/vnd.ms-excel'
]
ZIP_FORMATS = [
'multipart/x-zip',
'application/zip',
'application/zip-compressed',
'application/x-zip-compressed',
]
class DatasetResource(IsicResource):
def __init__(self):
super(DatasetResource, self).__init__()
self.resourceName = 'dataset'
self.route('GET', (), self.find)
self.route('GET', (':id',), self.getDataset)
self.route('GET', (':id', 'access'), self.getDatasetAccess)
self.route('PUT', (':id', 'access'), self.setDatasetAccess)
self.route('POST', (), self.createDataset)
self.route('POST', (':id', 'image'), self.addImage)
self.route('POST', (':id', 'zip'), self.initiateZipUploadToS3)
self.route('DELETE', (':id', 'zip', ':batchId'), self.cancelZipUploadToS3)
self.route('POST', (':id', 'zip', ':batchId'), self.finalizeZipUploadToS3)
self.route('GET', (':id', 'review'), self.getReviewImages)
self.route('POST', (':id', 'review'), self.submitReviewImages)
self.route('GET', (':id', 'metadata'), self.getRegisteredMetadata)
self.route('POST', (':id', 'metadata'), self.registerMetadata)
self.route('DELETE', (':id', 'metadata', ':metadataFileId'), self.removeMetadata)
self.route('GET', (':id', 'metadata', ':metadataFileId', 'download'), self.downloadMetadata)
self.route('POST', (':id', 'metadata', ':metadataFileId', 'apply'), self.applyMetadata)
@describeRoute(
Description('Return a list of lesion image datasets.')
.pagingParams(defaultSort='name')
.param('detail', 'Display the full information for each image, instead of a summary.',
required=False, dataType='boolean', default=False)
.errorResponse()
)
@access.public(cookie=True)
def find(self, params):
user = self.getCurrentUser()
detail = self.boolParam('detail', params, default=False)
limit, offset, sort = self.getPagingParameters(params, 'name')
filterFunc = Dataset().filter if detail else Dataset().filterSummary
return [
filterFunc(dataset, user)
for dataset in
Dataset().list(user=user, limit=limit, offset=offset, sort=sort)
]
@describeRoute(
Description("Return a lesion image dataset's details.")
.param('id', 'The ID of the dataset.', paramType='path')
.errorResponse('ID was invalid.')
)
@access.public(cookie=True)
@loadmodel(model='dataset', plugin='isic_archive', level=AccessType.READ)
def getDataset(self, dataset, params):
user = self.getCurrentUser()
return Dataset().filter(dataset, user)
@describeRoute(
Description('Get the access control list for a dataset.')
.param('id', 'The ID of the dataset.', paramType='path')
.errorResponse('ID was invalid.')
)
@access.user
@loadmodel(model='dataset', plugin='isic_archive', level=AccessType.ADMIN)
def getDatasetAccess(self, dataset, params):
return {
'access': Dataset().getFullAccessList(dataset),
'public': dataset['public']
}
@autoDescribeRoute(
Description('Set the access control list for a dataset.')
.modelParam('id', description='The ID of the dataset.', paramType='path',
model='dataset', plugin='isic_archive', level=AccessType.ADMIN)
.jsonParam('access', 'The JSON-encoded access control list.', paramType='form',
requireObject=True)
.param('public', 'Whether the dataset should be publicly visible.', paramType='form',
dataType='boolean')
.errorResponse('ID was invalid.')
)
@access.user
def setDatasetAccess(self, dataset, access, public, params):
# Since this is often submitted as a URLEncoded form by upstream Girder's client widget,
# the integer in the 'access' field is not decoded correctly by 'self._decodeParams'; so,
# use autoDescribeRoute to decode fields
Dataset().setAccessList(dataset, access)
Dataset().setPublic(dataset, public)
@describeRoute(
Description('Create a lesion image dataset.')
.param('name', 'Name of the dataset.', paramType='form')
.param('description', 'Description of the dataset.', paramType='form')
.param('license', 'License of the dataset.', paramType='form')
.param('attribution', 'Attribution of the dataset.', paramType='form')
.param('owner', 'Owner of the dataset.', paramType='form')
)
@access.user
def createDataset(self, params):
params = self._decodeParams(params)
self.requireParams(['name', 'description', 'license', 'attribution', 'owner'], params)
user = self.getCurrentUser()
User().requireCreateDataset(user)
dataset = Dataset().createDataset(
name=params['name'],
description=params['description'],
license=params['license'],
attribution=params['attribution'],
owner=params['owner'],
creatorUser=user
)
return Dataset().filter(dataset, user)
@describeRoute(
Description('Upload an image to a dataset.')
.notes('Send the image data in the request body, as shown in the examples below, '
'and the parameters in the query string. '
'Note that the examples ignore authentication and error handling.\n\n'
'In the examples, `file` is a '
'[File](https://developer.mozilla.org/en-US/docs/Web/API/File) object, '
'for example from an [<input type="file">]'
'(https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/file) '
"element or a drag and drop operation's [DataTransfer]"
'(https://developer.mozilla.org/en-US/docs/Web/API/DataTransfer) object.\n\n'
'Example using [XMLHttpRequest]'
'(https://developer.mozilla.org/en-US/docs/Web/API/XMLHttpRequest):\n'
'```\n'
'var req = new XMLHttpRequest();\n'
"req.open('POST', url, true); // url includes parameters\n"
'req.onload = function (event) {\n'
' // Uploaded\n'
'};\n'
"req.setRequestHeader('Content-Type', 'image/jpeg');\n"
'req.send(file);\n'
'```\n\n'
'Example using [jQuery.ajax()](http://api.jquery.com/jquery.ajax/):\n'
'```\n'
'$.ajax({\n'
' url: url, // url includes parameters\n'
" method: 'POST',\n"
' data: file,\n'
" contentType: 'image/jpeg',\n"
' processData: false,\n'
'}).done(function (resp) {\n'
' // Uploaded\n'
'});\n'
'```\n\n'
'Example using [axios](https://github.com/axios/axios):\n'
'```\n'
'axios({\n'
" method: 'post',\n"
' url: url,\n'
' params: {\n'
" filename: 'my_image.jpg',\n"
" signature: 'my signature',\n"
' },\n'
' data: file,\n'
' headers: {\n'
" 'Content-Type': 'image/jpeg',\n"
' }\n'
'}).then(function (resp) {\n'
' // Uploaded\n'
'});\n'
'```\n\n'
'Note that files uploaded in the request body are not supported by '
'[OpenAPI 2.0](https://swagger.io/docs/specification/2-0/file-upload/), '
"so it's currently not possible to use this endpoint from the Swagger UI "
'interface.')
# Note: OpenAPI 3.0 supports files uploaded in the request body, but Swagger GUI may not
# properly display the file upload UI. See:
# - https://swagger.io/docs/specification/describing-request-body/file-upload/
# - https://github.com/swagger-api/swagger-ui/issues/3641
.param('id', 'The ID of the dataset.', paramType='path')
.param('filename', 'Image filename.', paramType='query')
.param('signature', 'Signature of license agreement.', paramType='query')
)
@access.user
@loadmodel(model='dataset', plugin='isic_archive', level=AccessType.WRITE)
def addImage(self, dataset, params):
params = self._decodeParams(params)
self.requireParams(['filename', 'signature'], params)
user = self.getCurrentUser()
User().requireCreateDataset(user)
filename = params['filename'].strip()
if not filename:
raise ValidationException('Filename must be specified.', 'filename')
signature = params['signature'].strip()
if not signature:
raise ValidationException('Signature must be specified.', 'signature')
imageDataStream = RequestBodyStream(cherrypy.request.body)
imageDataSize = len(imageDataStream)
if not imageDataSize:
raise RestException('No data provided in request body.')
image = Dataset().addImage(
dataset=dataset,
imageDataStream=imageDataStream,
imageDataSize=imageDataSize,
filename=filename,
signature=signature,
user=user)
# avoid circular imports from models.__init__
from isic_archive.tasks import ingestImage
ingestImage.delay(image['_id'])
return Image().filter(image, user=user)
@describeRoute(
Description('Initiate a direct-to-S3 upload of a ZIP file of images.')
.notes('This endpoint returns information that allows the client to upload a '
'ZIP file of images directly to an Amazon Web Services (AWS) S3 bucket.'
'\n\n'
"It's recommended that the client use an AWS SDK, such as "
'[Boto 3](https://github.com/boto/boto3) or '
'[AWS SDK for JavaScript](https://github.com/aws/aws-sdk-js), '
'to simplify authenticating and uploading the file.'
'\n\n'
'More specifically, this endpoint returns a JSON response that includes:\n'
'- Temporary security credentials to authenticate with AWS:\n'
' - `accessKeyId`\n'
' - `secretAccessKey`\n'
' - `sessionToken`\n'
'- An S3 bucket name and object key in which to upload the file:\n'
' - `bucketName`\n'
' - `objectKey`\n'
'- A batch identifier for subsequent API calls:\n'
' - `batchId`\n'
'\n\n'
'After calling this endpoint, the client should use this information to upload '
'the ZIP file directly to S3, as shown in the examples below.'
'\n\n'
'#### Example using Boto 3\n'
'```\n'
'import boto3\n'
's3 = boto3.client(\n'
" 's3',\n"
" aws_access_key_id=response['accessKeyId'],\n"
" aws_secret_access_key=response['secretAccessKey'],\n"
" aws_session_token=response['sessionToken']\n"
')\n'
'\n'
"with open('images.zip', 'rb') as data:\n"
' s3.upload_fileobj(\n'
' Fileobj=data,\n'
" Bucket=response['bucketName'],\n"
" Key=response['objectKey']\n"
' )\n'
'\n'
'# Store batch identifier\n'
"batchId = response['batchId']\n"
'```\n\n'
'#### Example using AWS SDK for JavaScript\n'
'```\n'
'AWS.config.update({\n'
' accessKeyId: response.accessKeyId,\n'
' secretAccessKey: response.secretAccessKey,\n'
' sessionToken: response.sessionToken\n'
'});\n'
'\n'
'// Store batch identifier\n'
'var batchId = response.batchId;\n'
'\n'
'var s3 = new AWS.S3({\n'
" apiVersion: '2006-03-01'\n"
'});\n'
'\n'
'var params = {\n'
' Bucket: response.bucketName,\n'
' Key: response.objectKey,\n'
' Body: data\n'
'};\n'
's3.upload(params, function (err, data) {\n'
' if (err) {\n'
' console.log(\"Error\", err);\n'
' } else {\n'
' // Uploaded\n'
' }\n'
'});\n'
'```\n\n'
'#### Finalizing the upload\n'
'\n\n'
'To finalize the upload, the client should call '
'`POST /dataset/{id}/zip/{batchId}`.'
'\n\n'
'To cancel the upload, the client should call '
'`DELETE /dataset/{id}/zip/{batchId}`.'
)
.param('id', 'The ID of the dataset.', paramType='path')
.param('signature', 'Signature of license agreement.', paramType='form')
.param('filename', 'The filename of the ZIP upload.', paramType='form')
)
@access.user
@loadmodel(model='dataset', plugin='isic_archive', level=AccessType.WRITE)
def initiateZipUploadToS3(self, dataset, params):
params = self._decodeParams(params)
self.requireParams(['signature'], params)
user = self.getCurrentUser()
User().requireCreateDataset(user)
filename = params.get('filename', None)
signature = params['signature'].strip()
if not signature:
raise ValidationException('Signature must be specified.', 'signature')
try:
return Dataset().initiateZipUploadS3(dataset=dataset, signature=signature, user=user,
filename=filename)
except GirderException as e:
raise RestException(str(e))
@describeRoute(
Description('Cancel a direct-to-S3 upload of a ZIP file of images.')
.notes('Call this to cancel a direct-to-S3 upload instead of finalizing it.')
.param('id', 'The ID of the dataset.', paramType='path')
.param('batchId', 'The ID of the batch.', paramType='path')
)
@access.user
@loadmodel(map={'batchId': 'batch'}, model='batch', plugin='isic_archive')
@loadmodel(model='dataset', plugin='isic_archive', level=AccessType.WRITE)
def cancelZipUploadToS3(self, dataset, batch, params):
user = self.getCurrentUser()
User().requireCreateDataset(user)
try:
Dataset().cancelZipUploadS3(dataset=dataset, batch=batch, user=user)
except GirderException as e:
raise RestException(str(e))
@describeRoute(
Description('Finalize a direct-to-S3 upload of a ZIP file of images.')
.notes('Call this after uploading the ZIP file of images to S3. '
'The images in the ZIP file will be added to the dataset.')
.param('id', 'The ID of the dataset.', paramType='path')
.param('batchId', 'The ID of the batch.', paramType='path')
)
@access.user
@loadmodel(map={'batchId': 'batch'}, model='batch', plugin='isic_archive')
@loadmodel(model='dataset', plugin='isic_archive', level=AccessType.WRITE)
def finalizeZipUploadToS3(self, dataset, batch, params):
user = self.getCurrentUser()
# Note: we don't require the finalizer of the upload to be the creator of the batch
User().requireCreateDataset(user)
Dataset().finalizeZipUploadS3(batch)
cherrypy.response.status = 201
@describeRoute(
Description('Get a list of images in this dataset to QC Review.')
.param('id', 'The ID of the dataset.', paramType='path')
.param('limit', 'Result set size limit.', default=50, required=False, dataType='int')
.errorResponse('ID was invalid.')
)
@access.user
@loadmodel(model='dataset', plugin='isic_archive', level=AccessType.WRITE)
def getReviewImages(self, dataset, params):
user = self.getCurrentUser()
User().requireReviewDataset(user)
prereviewFolder = Dataset().prereviewFolder(dataset)
if not prereviewFolder:
raise AccessException('There are no pending Pre-review images for this dataset.')
limit = int(params.get('limit', 50))
output = [
{
field: image[field]
for field in
['_id', 'name', 'updated', 'description', 'meta']
}
for image in
Image().find(
{'folderId': prereviewFolder['_id']},
limit=limit,
sort=[
('meta.clinical.diagnosis', SortDir.ASCENDING),
('name', SortDir.ASCENDING)
]
)
]
return output
@describeRoute(
Description('Do a QC Review of images within a dataset.')
.param('id', 'The ID of the dataset.', paramType='path')
.param('accepted', 'The IDs of accepted images, as a JSON array.', paramType='form')
.param('flagged', 'The IDs of flagged images, as a JSON array.', paramType='form')
.errorResponse('ID was invalid.')
)
@access.user
@loadmodel(model='dataset', plugin='isic_archive', level=AccessType.WRITE)
def submitReviewImages(self, dataset, params):
user = self.getCurrentUser()
User().requireReviewDataset(user)
params = self._decodeParams(params)
self.requireParams(['accepted', 'flagged'], params)
# TODO: validate that parameters are lists of strings
acceptedImages = [
Image().load(imageId, user=user, level=AccessType.READ, exc=True)
for imageId in params['accepted']
]
flaggedImages = [
Image().load(imageId, user=user, level=AccessType.READ, exc=True)
for imageId in params['flagged']
]
Dataset().reviewImages(dataset, acceptedImages, flaggedImages, user)
# TODO: return value?
return {'status': 'success'}
@describeRoute(
Description('Get registered metadata for a dataset.')
.param('id', 'The ID of the dataset.', paramType='path')
)
@access.user
@loadmodel(model='dataset', plugin='isic_archive', level=AccessType.WRITE)
def getRegisteredMetadata(self, dataset, params):
user = self.getCurrentUser()
User().requireReviewDataset(user)
output = []
for registration in dataset['metadataFiles']:
# TODO: "File().load" can use the "fields" argument and be expressed
# as a comprehension, once the fix from upstream Girder is available
metadataFile = File().load(registration['fileId'], force=True, exc=True)
output.append({
'file': {
'_id': metadataFile['_id'],
'name': metadataFile['name']
},
'user': User().filterSummary(
User().load(registration['userId'], force=True, exc=True),
user),
'time': registration['time']
})
return output
@describeRoute(
Description('Register metadata with a dataset.')
.notes('Send the CSV metadata data in the request body with '
'the `Content-Type` header set to `text/csv`.')
.param('id', 'The ID of the dataset.', paramType='path')
.param('filename', 'The metadata filename.', paramType='query')
)
@access.user
@loadmodel(model='dataset', plugin='isic_archive', level=AccessType.WRITE)
def registerMetadata(self, dataset, params):
params = self._decodeParams(params)
self.requireParams(['filename'], params)
user = self.getCurrentUser()
filename = params['filename'].strip()
if not filename:
raise ValidationException('Filename must be specified.', 'filename')
metadataDataStream = RequestBodyStream(cherrypy.request.body)
if not len(metadataDataStream):
raise RestException('No data provided in request body.')
Dataset().registerMetadata(
dataset=dataset, user=user, metadataDataStream=metadataDataStream, filename=filename,
sendMail=True)
# TODO: return value?
return {'status': 'success'}
def _checkFileFormat(self, file, formats):
"""
Check whether a file is of an expected format.
:param file: The file document.
:param formats: A list of valid formats.
:return: True if the file is of an expected format.
"""
if file['mimeType'] in formats:
return True
if file['mimeType'] in ['application/octet-stream', None] and \
mimetypes.guess_type(file['name'], strict=False)[0] in formats:
return True
return False
@describeRoute(
Description('Delete metadata registered with a dataset.')
.param('id', 'The ID of the dataset.', paramType='path')
.param('metadataFileId', 'The ID of the .csv metadata file.', paramType='path')
)
@access.admin
# File is attached to dataset, so access level refers to permission on dataset
@loadmodel(model='file', map={'metadataFileId': 'metadataFile'}, level=AccessType.SITE_ADMIN)
@loadmodel(model='dataset', plugin='isic_archive', level=AccessType.SITE_ADMIN)
def removeMetadata(self, dataset, metadataFile, params):
self._requireMetadataFile(dataset, metadataFile)
Dataset().removeMetadata(dataset=dataset, metadataFile=metadataFile)
# No Content
cherrypy.response.status = 204
@describeRoute(
Description('Download metadata registered with dataset.')
.param('id', 'The ID of the dataset.', paramType='path')
.param('metadataFileId', 'The ID of the .csv metadata file.', paramType='path')
)
@access.public(scope=TokenScope.DATA_READ, cookie=True)
# File is attached to dataset, so access level refers to permission on dataset
@loadmodel(model='file', map={'metadataFileId': 'metadataFile'}, level=AccessType.WRITE)
@loadmodel(model='dataset', plugin='isic_archive', level=AccessType.WRITE)
def downloadMetadata(self, dataset, metadataFile, params):
user = self.getCurrentUser()
User().requireReviewDataset(user)
self._requireMetadataFile(dataset, metadataFile)
return File().download(metadataFile)
@describeRoute(
Description('Apply registered metadata to a dataset.')
.param('id', 'The ID of the dataset.', paramType='path')
.param('metadataFileId', 'The ID of the .csv metadata file.', paramType='path')
)
@access.user
# File is attached to dataset, so access level refers to permission on dataset
@loadmodel(model='file', map={'metadataFileId': 'metadataFile'}, level=AccessType.ADMIN)
@loadmodel(model='dataset', plugin='isic_archive', level=AccessType.ADMIN)
def applyMetadata(self, dataset, metadataFile, params):
params = self._decodeParams(params)
from isic_archive.tasks import applyMetadata as applyMetadata_
applyMetadata_.delay(dataset['_id'], metadataFile['_id'], self.getCurrentUser()['_id'])
def _requireMetadataFile(self, dataset, metadataFile):
"""Raise a ValidationException if the metadata file is not registered with the dataset."""
if metadataFile is None or not any(registration['fileId'] == metadataFile['_id']
for registration in dataset['metadataFiles']):
raise ValidationException('Metadata file ID is not registered.', 'metadataFileId')
| {
"content_hash": "7918c17ccf6bc99263f4a3d97863900f",
"timestamp": "",
"source": "github",
"line_count": 572,
"max_line_length": 100,
"avg_line_length": 43.35489510489511,
"alnum_prop": 0.5804669543126739,
"repo_name": "ImageMarkup/isic-archive",
"id": "106f0922aa0ea42578fb75bf75ae151a2866affb",
"size": "24799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "isic_archive/api/dataset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8936"
},
{
"name": "Dockerfile",
"bytes": "838"
},
{
"name": "HTML",
"bytes": "56481"
},
{
"name": "JavaScript",
"bytes": "3778033"
},
{
"name": "Jinja",
"bytes": "6417"
},
{
"name": "Mako",
"bytes": "76622"
},
{
"name": "PEG.js",
"bytes": "2182"
},
{
"name": "Pug",
"bytes": "51086"
},
{
"name": "Python",
"bytes": "381336"
},
{
"name": "Shell",
"bytes": "30"
},
{
"name": "Stylus",
"bytes": "18670"
},
{
"name": "TeX",
"bytes": "50168"
},
{
"name": "Vue",
"bytes": "70286"
}
],
"symlink_target": ""
} |
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
recursively delete duplicated nodes
"""
if not head: return None
# if the given head and head.next are duplicated nodes
if head.next and head.val == head.next.val:
# skip all the duplicated elements and calculate
while head.next and head.val == head.next.val:
head = head.next
# after the while loop, head is a non-duplicated element
# e.g. [2,2,3,4]: head is now set to 3 (2,2 are skipped)
head = self.deleteDuplicates(head.next)
else:
# we know head.val != head.next.val
# however, we don't know the relationship between head.next.val and head.next.next.....val
# so, we continue to calculate the rest of list that starts with head.next
head.next = self.deleteDuplicates(head.next)
return head | {
"content_hash": "d97b24eb10a75873ad8d8bce6ff98bb1",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 93,
"avg_line_length": 35.583333333333336,
"alnum_prop": 0.6967213114754098,
"repo_name": "comicxmz001/LeetCode",
"id": "d54266a7d7ff5c7af5a1d3cc21702d82e1308cb5",
"size": "1016",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/82. Remove Duplicates from Sorted List II.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "437"
},
{
"name": "Python",
"bytes": "126033"
}
],
"symlink_target": ""
} |
import argparse
import sys, os
parser = argparse.ArgumentParser()
parser.add_argument('pyqlabpath', help='path to PyQLab directory')
parser.add_argument('control', help='control qubit name')
parser.add_argument('target', help='target qubit name')
parser.add_argument('caltype', type=float, help='1 for length, 2 for phase')
parser.add_argument('length', type=float, help='step for length calibration or fixed length in phase calibration (ns)')
args = parser.parse_args()
sys.path.append(args.pyqlabpath)
execfile(os.path.join(args.pyqlabpath, 'startup.py'))
q2 = QubitFactory(args.control)
q1 = QubitFactory(args.target)
if args.caltype==1:
EchoCRLen(q2,q1,args.length*1e-9*np.arange(2,21),riseFall=20e-9,showPlot=False)
else:
EchoCRPhase(q2,q1,np.linspace(0,2*np.pi,19),length=args.length*1e-9, riseFall=20e-9, showPlot=False) | {
"content_hash": "baecc36a2ab82f35f657a07c12cd5b97",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 119,
"avg_line_length": 41.6,
"alnum_prop": 0.7608173076923077,
"repo_name": "Plourde-Research-Lab/Qlab",
"id": "33ab233cdc03b9c1eda527251b418ed3a4ceae0c",
"size": "832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiments/muWaveDetection/CRCal.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "15999"
},
{
"name": "C",
"bytes": "36511"
},
{
"name": "M",
"bytes": "31108"
},
{
"name": "Matlab",
"bytes": "1628579"
},
{
"name": "Processing",
"bytes": "49601"
},
{
"name": "Python",
"bytes": "54760"
}
],
"symlink_target": ""
} |
import inspect
from django import forms
from django.utils.safestring import mark_safe
from . import widgets as wizard_builder_widgets
def get_field_options():
'''
Turns the field generating functions on QuestionField into a series
of options
Formatted to be consumed by Question.type.choices
'''
inspected_funcs = inspect.getmembers(
QuestionField, predicate=inspect.ismethod)
field_names = [
(item[0], item[0])
for item in inspected_funcs
]
return field_names
class ConditionalFieldMixin:
def __init__(self, *args, choice_datas, **kwargs):
super().__init__(*args, **kwargs)
self.widget.choice_datas = choice_datas
class ConditionalChoiceField(
ConditionalFieldMixin,
forms.ChoiceField,
):
pass
class ConditionalMultipleChoiceField(
ConditionalFieldMixin,
forms.MultipleChoiceField,
):
pass
class QuestionField(object):
'''
The functions on this class correspond to the types of questions
you can use in the form wizard
They are used to validate Question.type. So whenever you add / remove
a field generating function, be sure to update the migrations
'''
@classmethod
def singlelinetext(cls, question):
return forms.CharField(
required=False,
label=mark_safe(question.text),
help_text=mark_safe(question.descriptive_text),
)
@classmethod
def textarea(cls, question):
return forms.CharField(
required=False,
label=mark_safe(question.text),
help_text=mark_safe(question.descriptive_text),
widget=forms.Textarea,
)
@classmethod
def checkbox(cls, question):
return ConditionalMultipleChoiceField(
required=False,
label=mark_safe(question.text),
help_text=mark_safe(question.descriptive_text),
widget=wizard_builder_widgets.CheckboxConditionalSelectMultiple,
choices=question.choices_pk_text_array,
choice_datas=question.choices_data_array,
)
@classmethod
def radiobutton(cls, question):
return ConditionalChoiceField(
required=False,
label=mark_safe(question.text),
help_text=mark_safe(question.descriptive_text),
widget=wizard_builder_widgets.RadioConditionalSelect,
choices=question.choices_pk_text_array,
choice_datas=question.choices_data_array,
)
@classmethod
def dropdown(cls, question):
return ConditionalChoiceField(
required=False,
label=mark_safe(question.text),
help_text=mark_safe(question.descriptive_text),
widget=wizard_builder_widgets.ConditionalSelect,
choices=question.choices_pk_text_array,
choice_datas=question.choices_data_array,
)
| {
"content_hash": "5524402f50d11213394576673d140f0e",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 76,
"avg_line_length": 28.339805825242717,
"alnum_prop": 0.6505652620760535,
"repo_name": "SexualHealthInnovations/django-wizard-builder",
"id": "b443ab513ae49c93aa26e276f471656f14af782b",
"size": "2919",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wizard_builder/fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "6885"
},
{
"name": "Makefile",
"bytes": "2525"
},
{
"name": "Python",
"bytes": "99398"
}
],
"symlink_target": ""
} |
"""Configures and starts up the Dinky Service.
"""
import logging
from logging.handlers import SysLogHandler
import os.path
import socket
import tornado.ioloop
import tornado.httpserver
from tornado.options import define, options
from tornado.util import exec_in
from tornado.escape import native_str
from tornado.web import RedirectHandler
from dinky.controllers import roothandler, hellohandler, bgndhandler
# directory containing the config files
CONF_DIR = os.path.join(os.path.dirname(__file__), '../config')
def configure_syslog():
"""
Configure syslog logging channel.
It is turned on by setting `syslog_host` in the config file.
The port default to 514 can be overridden by setting `syslog_port`.
"""
syslog_host = getattr(options, 'syslog_host', None)
if syslog_host:
handler = SysLogHandler(address=(syslog_host,
options.syslog_port))
ip = socket.gethostbyname(socket.gethostname())
formatter = logging.Formatter(ip+' '+options.name + ' %(message)s')
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
def define_options():
"""
Define the options from default.conf dynamically
"""
default = {}
with open(os.path.join(CONF_DIR, 'default.conf'), 'rb') as f:
exec_in(native_str(f.read()), {}, default)
for name, value in default.iteritems():
# if the option is already defined by tornado
# override the value
# a list of options set by tornado:
# log_file_num_backups, logging, help,
# log_to_stderr, log_file_max_size, log_file_prefix
if name in options:
setattr(options, name, value)
# otherwise define the option
else:
define(name, value)
def load_config():
"""
Use default.conf as the definition of options with default values
using tornado.options.define.
Then overrides the values from: local.conf.
This mapping allows to access the application configuration across the
application.
NOTE:
logging in load_config() is not going to work because logging is
configured only when tornado.options.parse_command_line(final=True)
"""
define_options()
local_conf = os.path.join(CONF_DIR, 'local.conf')
if os.path.isfile(local_conf):
options.parse_config_file(local_conf, final=False)
def log_config():
"""Logs the config used to start the application
"""
logging.info('Service will be started with such settings:')
for o in options.as_dict():
logging.info("{}=\"{}\"".format(o, options.as_dict()[o]))
def setup_server():
"""
Loads the routes and starts the server
"""
version_url_prefix = 'v{}'.format(options.version.split('.')[0])
application = tornado.web.Application([
(r"/", RedirectHandler, {"url":
r"/{}/dinky".format(version_url_prefix)}),
(r"/{}/dinky".format(version_url_prefix), roothandler.RootHandler),
(r"/{}/dinky/bgnd".format(version_url_prefix), bgndhandler.BgndHandler),
(r"/{}/dinky/hello".format(version_url_prefix),
hellohandler.HelloHandler)
])
server = tornado.httpserver.HTTPServer(application)
server.bind(options.port, options.ip)
# Forks multiple sub-processes, one for each core
server.start(int(options.processes))
logging.info('start tornado http server at http://{0}:{1}'.format(
options.ip, options.port))
tornado.ioloop.IOLoop.instance().start()
def main():
"""
The entry point for the Dinky service.
This will load the configuration files and start a Tornado webservice
with one or more sub processes.
NOTES:
tornado.options.parse_command_line(final=True)
Allows you to run the service with custom options.
Examples:
Change the logging level to debug:
python dinky --logging=DEBUG
python dinky --logging=debug
Configure custom syslog server
python dinky --syslog_host=54.77.151.169
"""
load_config()
tornado.options.parse_command_line(final=True)
log_config()
configure_syslog()
setup_server()
if __name__ == '__main__': # pragma: no cover
main() # pragma: no cover
| {
"content_hash": "55c9eddb85f2c9fdccb523cc85f11c55",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 80,
"avg_line_length": 31.100719424460433,
"alnum_prop": 0.6509368494101319,
"repo_name": "kowalcj0/dinky",
"id": "6b97d0f6a820e4e6d377083881c4425acb04026f",
"size": "4386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dinky/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39699"
}
],
"symlink_target": ""
} |
project = "sphinx-issues"
copyright = "2022, foobar"
author = "foobar"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx_issues"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
#
suppress_warnings = ["app.add_node"]
issues_uri = "https://gitlab.company.com/{group}/{project}/-/issues/{issue}"
issues_prefix = "#"
issues_pr_uri = "https://gitlab.company.com/{group}/{project}/-/merge_requests/{pr}"
issues_pr_prefix = "!"
issues_commit_uri = "https://gitlab.company.com/{group}/{project}/-/commit/{commit}"
issues_commit_prefix = "@"
issues_user_uri = "https://gitlab.company.com/{user}"
issues_user_prefix = "@"
issues_default_group_project = "myteam/super_great_project"
| {
"content_hash": "09208f3d04c08dbb10ca849aae041255",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 84,
"avg_line_length": 36.111111111111114,
"alnum_prop": 0.6806153846153846,
"repo_name": "sloria/sphinx-issues",
"id": "34e86c7bd2e448e9fff888f31cfa68ea8e39d12e",
"size": "2328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28635"
}
],
"symlink_target": ""
} |
from google.cloud import dialogflowcx_v3
async def sample_update_entity_type():
# Create a client
client = dialogflowcx_v3.EntityTypesAsyncClient()
# Initialize request argument(s)
entity_type = dialogflowcx_v3.EntityType()
entity_type.display_name = "display_name_value"
entity_type.kind = "KIND_REGEXP"
request = dialogflowcx_v3.UpdateEntityTypeRequest(
entity_type=entity_type,
)
# Make the request
response = await client.update_entity_type(request=request)
# Handle the response
print(response)
# [END dialogflow_v3_generated_EntityTypes_UpdateEntityType_async]
| {
"content_hash": "4147ac7e0af5a153d6726e7e672cd8fe",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 66,
"avg_line_length": 27.391304347826086,
"alnum_prop": 0.7206349206349206,
"repo_name": "googleapis/python-dialogflow-cx",
"id": "729381c0b61eeee0ae0f41735ffcee458164bdad",
"size": "2028",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/dialogflow_v3_generated_entity_types_update_entity_type_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "10904903"
},
{
"name": "Shell",
"bytes": "30681"
}
],
"symlink_target": ""
} |
import unicodedata
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
from django.utils.text import ugettext_lazy as _
from django.utils.decorators import method_decorator
from wand.image import Image
from wand.color import Color
def remove_accents(string):
return unicodedata.normalize('NFKD', unicode(string)).encode('ascii','ignore')
def with_timestamp(cls):
"""Decorator to add added/modified field to particular model"""
added_at = models.DateTimeField(verbose_name=_('added at'),
auto_now_add=True,
editable=False)
modified_at = models.DateTimeField(verbose_name=_('modified at'),
auto_now=True,
editable=False)
if not hasattr(cls, 'added_at'):
cls.add_to_class('added_at', added_at)
if not hasattr(cls, 'modified_at'):
cls.add_to_class('modified_at', modified_at)
return cls
def with_author(cls):
"""
Decorator to add added_by/modified_by field to particular model
"""
user_model = get_user_model()
cls_name = cls._meta.verbose_name_plural.lower()
created_by = models.ForeignKey(user_model,
related_name='%s_created' % cls_name,
verbose_name='author',
null=True,
blank=True,
editable=False)
modified_by = models.ForeignKey(user_model,
related_name='%s_modified' % cls_name,
verbose_name='last modified by',
null=True,
blank=True,
editable=False)
if not hasattr(cls, settings.AUTHOR_CREATED_BY_FIELD_NAME):
cls.add_to_class(settings.AUTHOR_CREATED_BY_FIELD_NAME, created_by)
if not hasattr(cls, settings.AUTHOR_UPDATED_BY_FIELD_NAME):
cls.add_to_class(settings.AUTHOR_UPDATED_BY_FIELD_NAME, modified_by)
return cls
def admin_commentable(cls):
"""
Adds a comments section to the change view,
"""
def change_view(self, request, object_id, form_url='', extra_context=None):
extra_context = extra_context or {}
extra_context['model_object'] = self.model.objects.get(pk=object_id)
return super(cls, self).change_view(request,
object_id, form_url, extra_context=extra_context)
cls.change_form_template = 'admin/change_form_commentable.html'
cls.change_view = change_view
return cls
def editonly_fieldsets(cls):
"""
Hides edit-only fieldsets when adding a new object.
"""
def get_fieldsets(self, request, obj=None):
if obj and hasattr(cls, 'editonly_fieldsets'):
if cls.fieldsets:
return cls.fieldsets + cls.editonly_fieldsets
else:
return cls.editonly_fieldsets
else:
return cls.fieldsets
cls.get_fieldsets = get_fieldsets
return cls
def class_view_decorator(function_decorator):
"""Convert a function based decorator into a class based decorator usable
on class based Views.
Can't subclass the `View` as it breaks inheritance (super in particular),
so we monkey-patch instead.
"""
def simple_decorator(View):
View.dispatch = method_decorator(function_decorator)(View.dispatch)
return View
return simple_decorator
def generate_pdf_thumbnail(source, destination, width, height):
source = settings.MEDIA_ROOT + source + '[0]'
destination = settings.MEDIA_ROOT + destination
with Image(filename=source) as img:
img.alpha_channel = False
img.background_color = Color('white')
img.resize(width, height)
img.save(filename=destination)
| {
"content_hash": "4c5f88a07017afa5833daf2b3b58a12c",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 82,
"avg_line_length": 32.39837398373984,
"alnum_prop": 0.5969887076537014,
"repo_name": "matus-stehlik/glowing-batman",
"id": "e8dc8a6d160b7ad355f57a4891cfc980136135cf",
"size": "3985",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "base/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "92"
},
{
"name": "JavaScript",
"bytes": "5817"
},
{
"name": "Python",
"bytes": "307329"
}
],
"symlink_target": ""
} |
"""irc2 low-level event handler"""
from . import utils
from .parser import Message
import logging
class IRCHandler(object):
"""
IRCHandler handles incoming messages from an IRCClient. This is usually not
something applications have to worry about.
"""
def __init__(self, client):
client.subscribe(Message(), self.handle_all)
client.subscribe(Message(verb="PING"), self.handle_ping)
client.subscribe(Message(verb="005"), self.handle_005)
client.subscribe(Message(verb="PRIVMSG"), self.handle_privmsg)
client.features = utils.IDict()
self.client = client
async def handle_all(self, message):
logging.info("Recv: {}".format(message))
async def handle_005(self, message):
server_features = message.args[1:]
for feature in server_features:
if "=" in feature:
key, value = feature.split("=", maxsplit=1)
self.client.features[key] = value
else:
self.client.features[feature] = True
logging.info("Received new features: {}".format(self.client.features))
async def handle_privmsg(self, message):
target, text = message.args
await self.client.event.fire("message", message, message.prefix, target, text)
async def handle_ping(self, message):
resp = message.args[0] if message.args else "PONG"
await self.client.send("PONG", resp)
if __name__ == '__main__':
import doctest
doctest.testmod()
| {
"content_hash": "fdb04b12d8bd2d90f304ad86ec311b07",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 86,
"avg_line_length": 34.45454545454545,
"alnum_prop": 0.6319261213720316,
"repo_name": "fwilson42/irc2",
"id": "146a9f503f70ce35588ed16e13bd29722d0ea119",
"size": "1516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "irc2/handler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "38818"
}
],
"symlink_target": ""
} |
"""Multi-dSprites dataset reader."""
import functools
import tensorflow.compat.v1 as tf
COMPRESSION_TYPE = tf.io.TFRecordOptions.get_compression_type_string('GZIP')
IMAGE_SIZE = [64, 64]
# The maximum number of foreground and background entities in each variant
# of the provided datasets. The values correspond to the number of
# segmentation masks returned per scene.
MAX_NUM_ENTITIES = {
'binarized': 4,
'colored_on_grayscale': 6,
'colored_on_colored': 5
}
BYTE_FEATURES = ['mask', 'image']
def feature_descriptions(max_num_entities, is_grayscale=False):
"""Create a dictionary describing the dataset features.
Args:
max_num_entities: int. The maximum number of foreground and background
entities in each image. This corresponds to the number of segmentation
masks and generative factors returned per scene.
is_grayscale: bool. Whether images are grayscale. Otherwise they're assumed
to be RGB.
Returns:
A dictionary which maps feature names to `tf.Example`-compatible shape and
data type descriptors.
"""
num_channels = 1 if is_grayscale else 3
return {
'image': tf.FixedLenFeature(IMAGE_SIZE+[num_channels], tf.string),
'mask': tf.FixedLenFeature(IMAGE_SIZE+[max_num_entities, 1], tf.string),
'x': tf.FixedLenFeature([max_num_entities], tf.float32),
'y': tf.FixedLenFeature([max_num_entities], tf.float32),
'shape': tf.FixedLenFeature([max_num_entities], tf.float32),
'color': tf.FixedLenFeature([max_num_entities, num_channels], tf.float32),
'visibility': tf.FixedLenFeature([max_num_entities], tf.float32),
'orientation': tf.FixedLenFeature([max_num_entities], tf.float32),
'scale': tf.FixedLenFeature([max_num_entities], tf.float32),
}
def _decode(example_proto, features):
# Parse the input `tf.Example` proto using a feature description dictionary.
single_example = tf.parse_single_example(example_proto, features)
for k in BYTE_FEATURES:
single_example[k] = tf.squeeze(tf.decode_raw(single_example[k], tf.uint8),
axis=-1)
# To return masks in the canonical [entities, height, width, channels] format,
# we need to transpose the tensor axes.
single_example['mask'] = tf.transpose(single_example['mask'], [2, 0, 1, 3])
return single_example
def dataset(tfrecords_path, dataset_variant, read_buffer_size=None,
map_parallel_calls=None):
"""Read, decompress, and parse the TFRecords file.
Args:
tfrecords_path: str. Path to the dataset file.
dataset_variant: str. One of ['binarized', 'colored_on_grayscale',
'colored_on_colored']. This is used to identify the maximum number of
entities in each scene. If an incorrect identifier is passed in, the
TFRecords file will not be read correctly.
read_buffer_size: int. Number of bytes in the read buffer. See documentation
for `tf.data.TFRecordDataset.__init__`.
map_parallel_calls: int. Number of elements decoded asynchronously in
parallel. See documentation for `tf.data.Dataset.map`.
Returns:
An unbatched `tf.data.TFRecordDataset`.
"""
if dataset_variant not in MAX_NUM_ENTITIES:
raise ValueError('Invalid `dataset_variant` provided. The supported values'
' are: {}'.format(list(MAX_NUM_ENTITIES.keys())))
max_num_entities = MAX_NUM_ENTITIES[dataset_variant]
is_grayscale = dataset_variant == 'binarized'
raw_dataset = tf.data.TFRecordDataset(
tfrecords_path, compression_type=COMPRESSION_TYPE,
buffer_size=read_buffer_size)
features = feature_descriptions(max_num_entities, is_grayscale)
partial_decode_fn = functools.partial(_decode, features=features)
return raw_dataset.map(partial_decode_fn,
num_parallel_calls=map_parallel_calls)
| {
"content_hash": "f8f9c4600810bb1f9d859f80e7542501",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 80,
"avg_line_length": 42.43333333333333,
"alnum_prop": 0.7025399319193506,
"repo_name": "deepmind/multi_object_datasets",
"id": "ef65f4bda220cda548ca43b28cea51fcbd04bd40",
"size": "4513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "multi_dsprites.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "24104"
}
],
"symlink_target": ""
} |
from os.path import join as pjoin
# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 1
_version_minor = 0
_version_micro = '' # use '' for first of series, number for 1 and above
#_version_extra = 'dev'
_version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ['Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering']
# Description should be a one-liner:
description = 'anneal: a general-purpose simulated-annealing implementation'
# Long description will go up on the pypi page
long_description = '''
Anneal
========
``anneal`` is a simple python implementation of the simulated-annealing
optimization algorithm.
To get started, please check the repository README_.
.. _README: https://github.com/tcompa/anneal/blob/master/README.md
License
=======
``anneal`` is licensed under the terms of the MIT license. See the file
"LICENSE" for information on the history of this software, terms & conditions
for usage, and a DISCLAIMER OF ALL WARRANTIES.
All trademarks referenced herein are property of their respective holders.
Copyright (c) 2016 Tommaso Comparin
'''
NAME = 'anneal'
MAINTAINER = 'Tommaso Comparin'
MAINTAINER_EMAIL = 'tommaso.comparin@gmail.com'
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = 'http://github.com/tcompa/anneal/'
DOWNLOAD_URL = ''
LICENSE = 'MIT'
AUTHOR = 'Tommaso Comparin'
AUTHOR_EMAIL = 'tommaso.comparin@gmail.com'
PLATFORMS = 'OS Independent'
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
PACKAGES = ['anneal',
'anneal.tests']
PACKAGE_DATA = {'anneal': [pjoin('data', '*')]}
REQUIRES = []
| {
"content_hash": "2d0d4a9f950691580f3a161d5ae0dcb7",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 77,
"avg_line_length": 31.536231884057973,
"alnum_prop": 0.6907169117647058,
"repo_name": "tcompa/anneal",
"id": "ecdeeee62211849ca33e3aa52295d6ce755e9a7c",
"size": "2176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "anneal/version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7977"
}
],
"symlink_target": ""
} |
import atexit
import logging
import requests
import time
import google.auth
from functools import partial
from google.rpc import status_pb2
from google.auth.credentials import with_scopes_if_required
from google.auth.exceptions import RefreshError
from google.auth.transport.requests import Request as AuthRequest
from requests.packages.urllib3.util.retry import Retry
from threading import local
from .credentials_watcher import CredentialsWatcher
_state = local()
_refresh_status_codes = (401,)
_max_refresh_attempts = 5
_credentials_watcher = CredentialsWatcher()
atexit.register(_credentials_watcher.stop)
class RequestsProxy(object):
"""Wraps a ``requests`` library :class:`.Session` instance and
exposes a compatible `request` method.
"""
SCOPE = None
#: Determines how connection and read timeouts should be handled
#: by this proxy.
TIMEOUT_CONFIG = (3.05, 30)
#: Determines how retries should be handled by this proxy.
RETRY_CONFIG = Retry(
total=10, connect=10, read=5,
method_whitelist=Retry.DEFAULT_METHOD_WHITELIST | frozenset(["POST"])
)
#: The number of connections to pool per Session.
CONNECTION_POOL_SIZE = 32
# A mapping from numeric Google RPC error codes to known error
# code strings.
_PB_ERROR_CODES = {
2: "UNKNOWN",
4: "DEADLINE_EXCEEDED",
10: "ABORTED",
13: "INTERNAL",
14: "UNAVAILABLE",
}
# Fix for GCS client setting ALLOW_AUTO_SWITCH_TO_MTLS_URL to True
# unless an api_endpoint client_option is set
# https://github.com/googleapis/python-cloud-core/pull/75/files
is_mtls = False
def __init__(self, credentials=None, logger=None):
if credentials is None:
credentials = google.auth.default()[0]
credentials = with_scopes_if_required(credentials, self.SCOPE)
self.logger = logger or logging.getLogger(type(self).__name__)
self.credentials = credentials
_credentials_watcher.watch(credentials)
def __del__(self):
try:
_credentials_watcher.unwatch(self.credentials)
except TypeError:
# This can happen when the daemon thread shuts down and
# __del__() is implicitly ran. Crops up most commonly
# in test suites as 'NoneType' object is not callable.
pass
def request(self, method, url, data=None, headers=None, retries=0, refresh_attempts=0, **kwargs):
session = self._get_session()
headers = headers.copy() if headers is not None else {}
auth_request = AuthRequest(session=session)
retry_auth = partial(
self.request,
url=url, method=method,
data=data, headers=headers,
refresh_attempts=refresh_attempts + 1,
retries=0, # Retries intentionally get reset to 0.
**kwargs
)
try:
self.credentials.before_request(auth_request, method, url, headers)
except RefreshError:
if refresh_attempts < _max_refresh_attempts:
return retry_auth()
raise
# Do not allow multiple timeout kwargs.
kwargs["timeout"] = self.TIMEOUT_CONFIG
response = session.request(method, url, data=data, headers=headers, **kwargs)
if response.status_code in _refresh_status_codes and refresh_attempts < _max_refresh_attempts:
self.logger.info(
"Refreshing credentials due to a %s response. Attempt %s/%s.",
response.status_code, refresh_attempts + 1, _max_refresh_attempts
)
try:
self.credentials.refresh(auth_request)
except RefreshError:
pass
return retry_auth()
elif response.status_code >= 400:
response = self._handle_response_error(
response, retries,
url=url, method=method,
data=data, headers=headers,
**kwargs
)
return response
def _get_session(self):
# Ensure we use one connection-pooling session per thread and
# make use of requests' internal retry mechanism. It will
# safely retry any requests that failed due to DNS lookup,
# socket errors, etc.
session = getattr(_state, "session", None)
if session is None:
session = _state.session = requests.Session()
adapter = _state.adapter = requests.adapters.HTTPAdapter(
max_retries=self.RETRY_CONFIG,
pool_connections=self.CONNECTION_POOL_SIZE,
pool_maxsize=self.CONNECTION_POOL_SIZE,
)
session.mount("http://", adapter)
session.mount("https://", adapter)
return session
def _handle_response_error(self, response, retries, **kwargs):
r"""Provides a way for each connection wrapper to handle error
responses.
Parameters:
response(Response): An instance of :class:`.requests.Response`.
retries(int): The number of times :meth:`.request` has been
called so far.
\**kwargs: The parameters with which :meth:`.request` was
called. The `retries` parameter is excluded from `kwargs`
intentionally.
Returns:
requests.Response
"""
error = self._convert_response_to_error(response)
if error is None:
return response
max_retries = self._max_retries_for_error(error)
if max_retries is None or retries >= max_retries:
return response
backoff = min(0.0625 * 2 ** retries, 1.0)
self.logger.warning("Sleeping for %r before retrying failed request...", backoff)
time.sleep(backoff)
retries += 1
self.logger.warning("Retrying failed request. Attempt %d/%d.", retries, max_retries)
return self.request(retries=retries, **kwargs)
def _convert_response_to_error(self, response):
"""Subclasses may override this method in order to influence
how errors are parsed from the response.
Parameters:
response(Response): The response object.
Returns:
object or None: Any object for which a max retry count can
be retrieved or None if the error cannot be handled.
"""
content_type = response.headers.get("content-type", "")
if "application/x-protobuf" in content_type:
self.logger.debug("Decoding protobuf response.")
data = status_pb2.Status.FromString(response.content)
status = self._PB_ERROR_CODES.get(data.code)
error = {"status": status}
return error
elif "application/json" in content_type:
self.logger.debug("Decoding json response.")
data = response.json()
error = data.get("error")
if not error or not isinstance(error, dict):
self.logger.warning("Unexpected error response: %r", data)
return None
return error
self.logger.warning("Unexpected response: %r", response.text)
return None
def _max_retries_for_error(self, error):
"""Subclasses may implement this method in order to influence
how many times various error types should be retried.
Parameters:
error(dict): A dictionary containing a `status
Returns:
int or None: The max number of times this error should be
retried or None if it shouldn't.
"""
return None
| {
"content_hash": "d9aa323705aee75e110f797fd6ac6998",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 102,
"avg_line_length": 35.39351851851852,
"alnum_prop": 0.6170045781556573,
"repo_name": "LeadPages/gcloud_requests",
"id": "bd4b5c93d00dbf13bd66781faf87fdea2649942c",
"size": "7645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gcloud_requests/proxy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33710"
}
],
"symlink_target": ""
} |
from ..lang.tools.baselex import BaseLexer, EOF, EPS
from ..lang.tools.grammar import Grammar
from ..lang.tools.lr import LrParserBuilder
from ..common import make_num, get_file
def get_layout(layout):
""" Get a layout from object or file """
if isinstance(layout, Layout):
return layout
else:
file = get_file(layout)
layout = Layout.load(file)
file.close()
return layout
class Layout:
""" Defines a layout for the linker to be used """
def __init__(self):
self.memories = []
self.entry = None
def add_memory(self, memory):
self.memories.append(memory)
def __eq__(self, other):
return self.memories == other.memories
def __repr__(self):
return str(self.memories)
@staticmethod
def load(file):
""" Load a layout from file """
return _lloader.load_layout(file)
class EntrySymbol:
"""Specify the entry symbol of this file."""
def __init__(self, symbol_name):
self.symbol_name = symbol_name
class Memory:
""" Specification of how a memory may look like and what it contains. """
def __init__(self, name):
self.inputs = []
self.name = name
self.location = 0x0
self.size = 0x0
def add_input(self, inp):
assert isinstance(inp, Input)
self.inputs.append(inp)
def __repr__(self):
return "MEM {} loc={:08X} size={:08X}".format(
self.name, self.location, self.size
) + str(self.inputs)
def __eq__(self, other):
return str(self) == str(other)
class Input:
pass
class Section(Input):
""" Insert a section here """
def __init__(self, section_name):
self.section_name = section_name
def __repr__(self):
return "Section({})".format(self.section_name)
class SectionData(Input):
""" Insert only the data of a section here, not the section itself """
def __init__(self, section_name):
self.section_name = section_name
def __repr__(self):
return "SectionData({})".format(self.section_name)
class Align(Input):
""" Align the current position to the given byte """
def __init__(self, alignment):
self.alignment = alignment
def __repr__(self):
return "Align({})".format(self.alignment)
class SymbolDefinition(Input):
def __init__(self, symbol_name):
self.symbol_name = symbol_name
def __repr__(self):
return "Symbol define: {}".format(self.symbol_name)
class LayoutLexer(BaseLexer):
""" Lexer for layout files """
kws = [
"MEMORY",
"ALIGN",
"ENTRY",
"LOCATION",
"SECTION",
"SECTIONDATA",
"SIZE",
"DEFINESYMBOL",
]
def __init__(self):
tok_spec = [
("HEXNUMBER", r"0x[\da-fA-F]+", self.handle_number),
("NUMBER", r"\d+", self.handle_number),
("ID", r"[_A-Za-z][_A-Za-z\d_]*", self.handle_id),
("SKIP", r"[ \t\r\n]", None),
(
"LEESTEKEN",
r":=|[\.,=:\-+*\[\]/\(\)]|>=|<=|<>|>|<|}|{",
lambda typ, val: (val, val),
),
("STRING", r"'.*?'", lambda typ, val: (typ, val[1:-1])),
]
super().__init__(tok_spec)
def handle_id(self, typ, val):
if val in self.kws:
typ = val
return typ, val
def handle_number(self, typ, val):
val = make_num(val)
typ = "NUMBER"
return typ, val
class LayoutParser:
def __init__(self, kws):
toks = [
"ID",
"NUMBER",
"{",
"}",
".",
":",
"=",
"(",
")",
EPS,
EOF,
] + kws
grammar = Grammar()
grammar.add_terminals(toks)
grammar.add_production("layout", ["top_level_list"])
grammar.add_one_or_more("top_level", "top_level_list")
grammar.add_production("top_level", ["mem"])
grammar.add_production("top_level", ["entry"])
grammar.add_production(
"entry", ["ENTRY", "(", "ID", ")"], self.handle_entry
)
grammar.add_production(
"mem",
[
"MEMORY",
"ID",
"LOCATION",
"=",
"NUMBER",
"SIZE",
"=",
"NUMBER",
"{",
"input_list",
"}",
],
self.handle_mem,
)
grammar.add_one_or_more("input", "input_list")
grammar.add_production(
"input", ["ALIGN", "(", "NUMBER", ")"], self.handle_align
)
grammar.add_production(
"input", ["SECTION", "(", "ID", ")"], self.handle_section
)
grammar.add_production(
"input", ["DEFINESYMBOL", "(", "ID", ")"], self.handle_defsym
)
grammar.add_production(
"input", ["SECTIONDATA", "(", "ID", ")"], self.handle_section_data
)
grammar.start_symbol = "layout"
self.p = LrParserBuilder(grammar).generate_parser()
def parse(self, lexer, layout):
self.layout = layout
self.p.parse(lexer)
def handle_mem(
self,
mem_tag,
mem_name,
loc_tag,
eq1,
loc,
size_tag,
eq2,
size,
lbrace,
inps,
rbrace,
):
m = Memory(mem_name.val)
m.size = size.val
m.location = loc.val
for inp in inps:
m.add_input(inp)
self.layout.add_memory(m)
def handle_entry(self, entry_tag, lbrace, name, rbrace):
self.layout.entry = EntrySymbol(name.val)
def handle_align(self, align_tag, lbrace, alignment, rbrace):
return Align(alignment.val)
def handle_section(self, section_tag, lbrace, section_name, rbrace):
return Section(section_name.val)
def handle_section_data(self, section_tag, lbrace, section_name, rbrace):
return SectionData(section_name.val)
def handle_defsym(self, section_tag, lbrace, name, rbrace):
return SymbolDefinition(name.val)
class LayoutLoader:
def __init__(self):
self.lexer = LayoutLexer()
self.parser = LayoutParser(self.lexer.kws)
def load_layout(self, f):
layout = Layout()
# TODO: perhaps the read is better in the lexer?
self.lexer.feed(f.read())
self.parser.parse(self.lexer, layout)
return layout
# Single definition:
_lloader = LayoutLoader()
| {
"content_hash": "62e2f75da4a955636db163b80d819942",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 78,
"avg_line_length": 25.158490566037734,
"alnum_prop": 0.5137243137843108,
"repo_name": "windelbouwman/ppci-mirror",
"id": "756c3e05f0e2339b2e47f0aabfefe12299c669c7",
"size": "6667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ppci/binutils/layout.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "94"
},
{
"name": "Brainfuck",
"bytes": "5867"
},
{
"name": "C",
"bytes": "229265"
},
{
"name": "C++",
"bytes": "1257"
},
{
"name": "Coq",
"bytes": "98028"
},
{
"name": "HTML",
"bytes": "363"
},
{
"name": "JavaScript",
"bytes": "2165"
},
{
"name": "LLVM",
"bytes": "11206"
},
{
"name": "Python",
"bytes": "2991165"
},
{
"name": "Shell",
"bytes": "960"
},
{
"name": "Verilog",
"bytes": "9363"
}
],
"symlink_target": ""
} |
"""The volumes extension."""
from oslo_utils import strutils
from webob import exc
from nova.api.openstack import api_version_request
from nova.api.openstack.api_version_request \
import MAX_PROXY_API_SUPPORT_VERSION
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import volumes as volumes_schema
from nova.api.openstack import wsgi
from nova.api import validation
from nova.compute import api as compute
from nova.compute import vm_states
from nova import exception
from nova.i18n import _
from nova import objects
from nova.policies import volumes as vol_policies
from nova.policies import volumes_attachments as va_policies
from nova.volume import cinder
def _translate_volume_detail_view(context, vol):
"""Maps keys for volumes details view."""
d = _translate_volume_summary_view(context, vol)
# No additional data / lookups at the moment
return d
def _translate_volume_summary_view(context, vol):
"""Maps keys for volumes summary view."""
d = {}
d['id'] = vol['id']
d['status'] = vol['status']
d['size'] = vol['size']
d['availabilityZone'] = vol['availability_zone']
d['createdAt'] = vol['created_at']
if vol['attach_status'] == 'attached':
# NOTE(ildikov): The attachments field in the volume info that
# Cinder sends is converted to an OrderedDict with the
# instance_uuid as key to make it easier for the multiattach
# feature to check the required information. Multiattach will
# be enable in the Nova API in Newton.
# The format looks like the following:
# attachments = {'instance_uuid': {
# 'attachment_id': 'attachment_uuid',
# 'mountpoint': '/dev/sda/
# }
# }
attachment = list(vol['attachments'].items())[0]
d['attachments'] = [_translate_attachment_summary_view(vol['id'],
attachment[0],
attachment[1].get('mountpoint'))]
else:
d['attachments'] = [{}]
d['displayName'] = vol['display_name']
d['displayDescription'] = vol['display_description']
if vol['volume_type_id'] and vol.get('volume_type'):
d['volumeType'] = vol['volume_type']['name']
else:
d['volumeType'] = vol['volume_type_id']
d['snapshotId'] = vol['snapshot_id']
if vol.get('volume_metadata'):
d['metadata'] = vol.get('volume_metadata')
else:
d['metadata'] = {}
return d
class VolumeController(wsgi.Controller):
"""The Volumes API controller for the OpenStack API."""
def __init__(self):
super(VolumeController, self).__init__()
self.volume_api = cinder.API()
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@wsgi.expected_errors(404)
def show(self, req, id):
"""Return data about the given volume."""
context = req.environ['nova.context']
context.can(vol_policies.POLICY_NAME % 'show',
target={'project_id': context.project_id})
try:
vol = self.volume_api.get(context, id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return {'volume': _translate_volume_detail_view(context, vol)}
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@wsgi.response(202)
@wsgi.expected_errors((400, 404))
def delete(self, req, id):
"""Delete a volume."""
context = req.environ['nova.context']
context.can(vol_policies.POLICY_NAME % 'delete',
target={'project_id': context.project_id})
try:
self.volume_api.delete(context, id)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@validation.query_schema(volumes_schema.index_query)
@wsgi.expected_errors(())
def index(self, req):
"""Returns a summary list of volumes."""
context = req.environ['nova.context']
context.can(vol_policies.POLICY_NAME % 'list',
target={'project_id': context.project_id})
return self._items(req, entity_maker=_translate_volume_summary_view)
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@validation.query_schema(volumes_schema.detail_query)
@wsgi.expected_errors(())
def detail(self, req):
"""Returns a detailed list of volumes."""
context = req.environ['nova.context']
context.can(vol_policies.POLICY_NAME % 'detail',
target={'project_id': context.project_id})
return self._items(req, entity_maker=_translate_volume_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of volumes, transformed through entity_maker."""
context = req.environ['nova.context']
volumes = self.volume_api.get_all(context)
limited_list = common.limited(volumes, req)
res = [entity_maker(context, vol) for vol in limited_list]
return {'volumes': res}
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@wsgi.expected_errors((400, 403, 404))
@validation.schema(volumes_schema.create)
def create(self, req, body):
"""Creates a new volume."""
context = req.environ['nova.context']
context.can(vol_policies.POLICY_NAME % 'create',
target={'project_id': context.project_id})
vol = body['volume']
vol_type = vol.get('volume_type')
metadata = vol.get('metadata')
snapshot_id = vol.get('snapshot_id', None)
if snapshot_id is not None:
try:
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
except exception.SnapshotNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
else:
snapshot = None
size = vol.get('size', None)
if size is None and snapshot is not None:
size = snapshot['volume_size']
availability_zone = vol.get('availability_zone')
try:
new_volume = self.volume_api.create(
context,
size,
vol.get('display_name'),
vol.get('display_description'),
snapshot=snapshot,
volume_type=vol_type,
metadata=metadata,
availability_zone=availability_zone
)
except exception.InvalidInput as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
except exception.OverQuota as err:
raise exc.HTTPForbidden(explanation=err.format_message())
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
retval = _translate_volume_detail_view(context, dict(new_volume))
result = {'volume': retval}
location = '%s/%s' % (req.url, new_volume['id'])
return wsgi.ResponseObject(result, headers=dict(location=location))
def _translate_attachment_detail_view(bdm, show_tag=False,
show_delete_on_termination=False):
"""Maps keys for attachment details view.
:param bdm: BlockDeviceMapping object for an attached volume
:param show_tag: True if the "tag" field should be in the response, False
to exclude the "tag" field from the response
:param show_delete_on_termination: True if the "delete_on_termination"
field should be in the response, False to exclude the
"delete_on_termination" field from the response
"""
d = _translate_attachment_summary_view(
bdm.volume_id, bdm.instance_uuid, bdm.device_name)
if show_tag:
d['tag'] = bdm.tag
if show_delete_on_termination:
d['delete_on_termination'] = bdm.delete_on_termination
return d
def _translate_attachment_summary_view(volume_id, instance_uuid, mountpoint):
"""Maps keys for attachment summary view."""
d = {}
# NOTE(justinsb): We use the volume id as the id of the attachment object
d['id'] = volume_id
d['volumeId'] = volume_id
d['serverId'] = instance_uuid
if mountpoint:
d['device'] = mountpoint
return d
def _check_request_version(req, min_version, method, server_id, server_state):
if not api_version_request.is_supported(req, min_version=min_version):
exc_inv = exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=server_id,
state=server_state,
method=method)
common.raise_http_conflict_for_instance_invalid_state(
exc_inv,
method,
server_id)
class VolumeAttachmentController(wsgi.Controller):
"""The volume attachment API controller for the OpenStack API.
A child resource of the server. Note that we use the volume id
as the ID of the attachment (though this is not guaranteed externally)
"""
def __init__(self):
self.compute_api = compute.API()
self.volume_api = cinder.API()
super(VolumeAttachmentController, self).__init__()
@wsgi.expected_errors(404)
@validation.query_schema(volumes_schema.index_query_275, '2.75')
@validation.query_schema(volumes_schema.index_query, '2.0', '2.74')
def index(self, req, server_id):
"""Returns the list of volume attachments for a given instance."""
context = req.environ['nova.context']
instance = common.get_instance(self.compute_api, context, server_id)
context.can(va_policies.POLICY_ROOT % 'index',
target={'project_id': instance.project_id})
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
limited_list = common.limited(bdms, req)
results = []
show_tag = api_version_request.is_supported(req, '2.70')
show_delete_on_termination = api_version_request.is_supported(
req, '2.79')
for bdm in limited_list:
if bdm.volume_id:
va = _translate_attachment_detail_view(
bdm, show_tag=show_tag,
show_delete_on_termination=show_delete_on_termination)
results.append(va)
return {'volumeAttachments': results}
@wsgi.expected_errors(404)
def show(self, req, server_id, id):
"""Return data about the given volume attachment."""
context = req.environ['nova.context']
instance = common.get_instance(self.compute_api, context, server_id)
context.can(va_policies.POLICY_ROOT % 'show',
target={'project_id': instance.project_id})
volume_id = id
try:
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
context, volume_id, instance.uuid)
except exception.VolumeBDMNotFound:
msg = (_("Instance %(instance)s is not attached "
"to volume %(volume)s") %
{'instance': server_id, 'volume': volume_id})
raise exc.HTTPNotFound(explanation=msg)
show_tag = api_version_request.is_supported(req, '2.70')
show_delete_on_termination = api_version_request.is_supported(
req, '2.79')
return {'volumeAttachment': _translate_attachment_detail_view(
bdm, show_tag=show_tag,
show_delete_on_termination=show_delete_on_termination)}
# TODO(mriedem): This API should return a 202 instead of a 200 response.
@wsgi.expected_errors((400, 403, 404, 409))
@validation.schema(volumes_schema.create_volume_attachment, '2.0', '2.48')
@validation.schema(volumes_schema.create_volume_attachment_v249, '2.49',
'2.78')
@validation.schema(volumes_schema.create_volume_attachment_v279, '2.79')
def create(self, req, server_id, body):
"""Attach a volume to an instance."""
context = req.environ['nova.context']
instance = common.get_instance(self.compute_api, context, server_id)
context.can(va_policies.POLICY_ROOT % 'create',
target={'project_id': instance.project_id})
volume_id = body['volumeAttachment']['volumeId']
device = body['volumeAttachment'].get('device')
tag = body['volumeAttachment'].get('tag')
delete_on_termination = body['volumeAttachment'].get(
'delete_on_termination', False)
if instance.vm_state in (vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED):
_check_request_version(req, '2.20', 'attach_volume',
server_id, instance.vm_state)
try:
supports_multiattach = common.supports_multiattach_volume(req)
device = self.compute_api.attach_volume(
context, instance, volume_id, device, tag=tag,
supports_multiattach=supports_multiattach,
delete_on_termination=delete_on_termination)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except (exception.InstanceIsLocked,
exception.DevicePathInUse,
exception.MultiattachNotSupportedByVirtDriver) as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'attach_volume', server_id)
except (exception.InvalidVolume,
exception.InvalidDevicePath,
exception.InvalidInput,
exception.VolumeTaggedAttachNotSupported,
exception.MultiattachNotSupportedOldMicroversion,
exception.MultiattachToShelvedNotSupported) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.TooManyDiskDevices as e:
raise exc.HTTPForbidden(explanation=e.format_message())
# The attach is async
# NOTE(mriedem): It would be nice to use
# _translate_attachment_summary_view here but that does not include
# the 'device' key if device is None or the empty string which would
# be a backward incompatible change.
attachment = {}
attachment['id'] = volume_id
attachment['serverId'] = server_id
attachment['volumeId'] = volume_id
attachment['device'] = device
if api_version_request.is_supported(req, '2.70'):
attachment['tag'] = tag
if api_version_request.is_supported(req, '2.79'):
attachment['delete_on_termination'] = delete_on_termination
return {'volumeAttachment': attachment}
def _update_volume_swap(self, req, instance, id, body):
context = req.environ['nova.context']
old_volume_id = id
try:
old_volume = self.volume_api.get(context, old_volume_id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
new_volume_id = body['volumeAttachment']['volumeId']
try:
new_volume = self.volume_api.get(context, new_volume_id)
except exception.VolumeNotFound as e:
# NOTE: This BadRequest is different from the above NotFound even
# though the same VolumeNotFound exception. This is intentional
# because new_volume_id is specified in a request body and if a
# nonexistent resource in the body (not URI) the code should be
# 400 Bad Request as API-WG guideline. On the other hand,
# old_volume_id is specified with URI. So it is valid to return
# NotFound response if that is not existent.
raise exc.HTTPBadRequest(explanation=e.format_message())
try:
self.compute_api.swap_volume(context, instance, old_volume,
new_volume)
except exception.VolumeBDMNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except (exception.InvalidVolume,
exception.MultiattachSwapVolumeNotSupported) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'swap_volume', instance.uuid)
def _update_volume_regular(self, req, instance, id, body):
context = req.environ['nova.context']
att = body['volumeAttachment']
# NOTE(danms): We may be doing an update of regular parameters in
# the midst of a swap operation, so to find the original BDM, we need
# to use the old volume ID, which is the one in the path.
volume_id = id
try:
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
context, volume_id, instance.uuid)
# NOTE(danms): The attachment id is just the (current) volume id
if 'id' in att and att['id'] != volume_id:
raise exc.HTTPBadRequest(explanation='The id property is '
'not mutable')
if 'serverId' in att and att['serverId'] != instance.uuid:
raise exc.HTTPBadRequest(explanation='The serverId property '
'is not mutable')
if 'device' in att and att['device'] != bdm.device_name:
raise exc.HTTPBadRequest(explanation='The device property is '
'not mutable')
if 'tag' in att and att['tag'] != bdm.tag:
raise exc.HTTPBadRequest(explanation='The tag property is '
'not mutable')
if 'delete_on_termination' in att:
bdm.delete_on_termination = strutils.bool_from_string(
att['delete_on_termination'], strict=True)
bdm.save()
except exception.VolumeBDMNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
@wsgi.response(202)
@wsgi.expected_errors((400, 404, 409))
@validation.schema(volumes_schema.update_volume_attachment, '2.0', '2.84')
@validation.schema(volumes_schema.update_volume_attachment_v285,
min_version='2.85')
def update(self, req, server_id, id, body):
context = req.environ['nova.context']
instance = common.get_instance(self.compute_api, context, server_id)
attachment = body['volumeAttachment']
volume_id = attachment['volumeId']
only_swap = not api_version_request.is_supported(req, '2.85')
# NOTE(brinzhang): If the 'volumeId' requested by the user is
# different from the 'id' in the url path, or only swap is allowed by
# the microversion, we should check the swap volume policy.
# otherwise, check the volume update policy.
if only_swap or id != volume_id:
context.can(va_policies.POLICY_ROOT % 'swap', target={})
else:
context.can(va_policies.POLICY_ROOT % 'update',
target={'project_id': instance.project_id})
if only_swap:
# NOTE(danms): Original behavior is always call swap on PUT
self._update_volume_swap(req, instance, id, body)
else:
# NOTE(danms): New behavior is update any supported attachment
# properties first, and then call swap if volumeId differs
self._update_volume_regular(req, instance, id, body)
if id != volume_id:
self._update_volume_swap(req, instance, id, body)
@wsgi.response(202)
@wsgi.expected_errors((400, 403, 404, 409))
def delete(self, req, server_id, id):
"""Detach a volume from an instance."""
context = req.environ['nova.context']
instance = common.get_instance(self.compute_api, context, server_id,
expected_attrs=['device_metadata'])
context.can(va_policies.POLICY_ROOT % 'delete',
target={'project_id': instance.project_id})
volume_id = id
if instance.vm_state in (vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED):
_check_request_version(req, '2.20', 'detach_volume',
server_id, instance.vm_state)
try:
volume = self.volume_api.get(context, volume_id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
try:
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
context, volume_id, instance.uuid)
except exception.VolumeBDMNotFound:
msg = (_("Instance %(instance)s is not attached "
"to volume %(volume)s") %
{'instance': server_id, 'volume': volume_id})
raise exc.HTTPNotFound(explanation=msg)
if bdm.is_root:
msg = _("Cannot detach a root device volume")
raise exc.HTTPBadRequest(explanation=msg)
try:
self.compute_api.detach_volume(context, instance, volume)
except exception.InvalidVolume as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'detach_volume', server_id)
def _translate_snapshot_detail_view(context, vol):
"""Maps keys for snapshots details view."""
d = _translate_snapshot_summary_view(context, vol)
# NOTE(gagupta): No additional data / lookups at the moment
return d
def _translate_snapshot_summary_view(context, vol):
"""Maps keys for snapshots summary view."""
d = {}
d['id'] = vol['id']
d['volumeId'] = vol['volume_id']
d['status'] = vol['status']
# NOTE(gagupta): We map volume_size as the snapshot size
d['size'] = vol['volume_size']
d['createdAt'] = vol['created_at']
d['displayName'] = vol['display_name']
d['displayDescription'] = vol['display_description']
return d
class SnapshotController(wsgi.Controller):
"""The Snapshots API controller for the OpenStack API."""
def __init__(self):
self.volume_api = cinder.API()
super(SnapshotController, self).__init__()
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@wsgi.expected_errors(404)
def show(self, req, id):
"""Return data about the given snapshot."""
context = req.environ['nova.context']
context.can(vol_policies.POLICY_NAME % 'snapshots:show',
target={'project_id': context.project_id})
try:
vol = self.volume_api.get_snapshot(context, id)
except exception.SnapshotNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return {'snapshot': _translate_snapshot_detail_view(context, vol)}
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@wsgi.response(202)
@wsgi.expected_errors(404)
def delete(self, req, id):
"""Delete a snapshot."""
context = req.environ['nova.context']
context.can(vol_policies.POLICY_NAME % 'snapshots:delete',
target={'project_id': context.project_id})
try:
self.volume_api.delete_snapshot(context, id)
except exception.SnapshotNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@validation.query_schema(volumes_schema.index_query)
@wsgi.expected_errors(())
def index(self, req):
"""Returns a summary list of snapshots."""
context = req.environ['nova.context']
context.can(vol_policies.POLICY_NAME % 'snapshots:list',
target={'project_id': context.project_id})
return self._items(req, entity_maker=_translate_snapshot_summary_view)
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@validation.query_schema(volumes_schema.detail_query)
@wsgi.expected_errors(())
def detail(self, req):
"""Returns a detailed list of snapshots."""
context = req.environ['nova.context']
context.can(vol_policies.POLICY_NAME % 'snapshots:detail',
target={'project_id': context.project_id})
return self._items(req, entity_maker=_translate_snapshot_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of snapshots, transformed through entity_maker."""
context = req.environ['nova.context']
snapshots = self.volume_api.get_all_snapshots(context)
limited_list = common.limited(snapshots, req)
res = [entity_maker(context, snapshot) for snapshot in limited_list]
return {'snapshots': res}
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@wsgi.expected_errors((400, 403))
@validation.schema(volumes_schema.snapshot_create)
def create(self, req, body):
"""Creates a new snapshot."""
context = req.environ['nova.context']
context.can(vol_policies.POLICY_NAME % 'snapshots:create',
target={'project_id': context.project_id})
snapshot = body['snapshot']
volume_id = snapshot['volume_id']
force = snapshot.get('force', False)
force = strutils.bool_from_string(force, strict=True)
if force:
create_func = self.volume_api.create_snapshot_force
else:
create_func = self.volume_api.create_snapshot
try:
new_snapshot = create_func(context, volume_id,
snapshot.get('display_name'),
snapshot.get('display_description'))
except exception.OverQuota as e:
raise exc.HTTPForbidden(explanation=e.format_message())
retval = _translate_snapshot_detail_view(context, new_snapshot)
return {'snapshot': retval}
| {
"content_hash": "d87919c9f70c4dfebf9eaf9de37efb28",
"timestamp": "",
"source": "github",
"line_count": 649,
"max_line_length": 78,
"avg_line_length": 41.72265023112481,
"alnum_prop": 0.6126744959007312,
"repo_name": "klmitch/nova",
"id": "15b752f59648d0a8e8a51cdbaaa5f286a1aa4d1d",
"size": "27714",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/api/openstack/compute/volumes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "851"
},
{
"name": "HTML",
"bytes": "1386"
},
{
"name": "PHP",
"bytes": "44222"
},
{
"name": "Python",
"bytes": "22328409"
},
{
"name": "Shell",
"bytes": "29138"
},
{
"name": "Smarty",
"bytes": "405441"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from datetime import datetime
from functools import partial
import json
import logging
from flexget.entry import Entry
from flexget.event import event
from flexget.plugin import PluginError
from flexget.utils import qualities
from flexget.utils.database import with_session
from requests.auth import AuthBase
from sqlalchemy import (Table, Column, Integer, String, ForeignKey, DateTime, Boolean)
from sqlalchemy.orm import relation, backref
from flexget import db_schema
from flexget.utils.requests import Session
from sqlalchemy.orm.exc import NoResultFound
log = logging.getLogger('t411_api')
# region ORM definitions
SCHEMA_VER = 0
Base = db_schema.versioned_base('t411', SCHEMA_VER)
category_term_types = Table('category_term_types', Base.metadata,
Column('category_id', Integer, ForeignKey('categories.id')),
Column('term_type_id', Integer, ForeignKey('term_types.id')))
Base.register_table(category_term_types)
torrent_terms = Table('torrent_terms', Base.metadata,
Column('torrent_id', Integer, ForeignKey('torrent.id')),
Column('term_id', Integer, ForeignKey('term.id')))
Base.register_table(torrent_terms)
@db_schema.upgrade('t411')
def upgrade(ver, session):
return SCHEMA_VER
class Category(Base):
__tablename__ = 'categories'
id = Column(Integer, primary_key=True)
name = Column(String)
parent_id = Column(Integer, ForeignKey('categories.id'))
sub_categories = relation('Category',
backref=backref('parent', remote_side=[id]),
cascade='all, delete, delete-orphan')
term_types = relation('TermType',
secondary=category_term_types,
backref='categories')
torrents = relation('Torrent',
backref='category',
cascade='all, delete, delete-orphan')
class TermType(Base):
__tablename__ = 'term_types'
id = Column(Integer, primary_key=True)
name = Column(String)
mode = Column(String)
terms = relation('Term',
backref='type',
cascade='all, delete, delete-orphan')
class Term(Base):
__tablename__ = 'term'
id = Column(Integer, primary_key=True)
name = Column(String)
type_id = Column(Integer, ForeignKey('term_types.id'))
class Torrent(Base):
"""
Immutable torrent informations
"""
__tablename__ = 'torrent'
id = Column(Integer, primary_key=True)
name = Column(String)
rewrite_name = Column(String)
category_id = Column(Integer, ForeignKey('categories.id'))
terms = relation('Term',
secondary='torrent_terms',
backref='torrents')
owner = Column(Integer)
username = Column(String)
class TorrentStatus(Base):
__tablename__ = 'torrent_status'
id = Column(Integer, primary_key=True)
torrent_id = Column(Integer, ForeignKey('torrent.id'))
timestamp = Column(DateTime)
class Credential(Base):
__tablename__ = 'credential'
username = Column(String, primary_key=True)
password = Column(String, nullable=False)
api_token = Column(String)
default = Column(Boolean, nullable=False, default=False)
# endregion ORM definition
class FriendlySearchQuery(object):
def __init__(self):
self.expression = None
self.category_name = None
self.term_names = []
self.max_results = 10
def add_season_term(self, season):
self.term_names.append("Saison %02d" % season)
def add_episode_term(self, episode):
self.term_names.append("Episode %02d" % episode)
T411API_DOMAIN_URL = "api.t411.ch"
T411API_CATEGORY_TREE_PATH = "/categories/tree/"
T411API_AUTH_PATH = "/auth"
T411API_TERMS_PATH = "/terms/tree/"
T411API_SEARCH_PATH = "/torrents/search/"
T411API_DOWNLOAD_PATH = "/torrents/download/"
T411API_DETAILS_PATH = "/torrents/details/"
T411_TERM_TYPE_ID_VIDEO_QUALITY = 7
T411_VIDEO_QUALITY_MAP = {
8: qualities.get("bluray"),
1171: qualities.get("bluray"),
17: qualities.get("bluray 1080p"),
1220: qualities.get("remux"),
13: qualities.get("dvdrip"),
14: qualities.get("dvdrip"),
10: qualities.get("dvdrip"),
1208: qualities.get("bluray 1080p"),
1218: qualities.get("bluray 720p"),
16: qualities.get("bluray 1080p"),
1219: qualities.get("bluray"),
15: qualities.get("bluray 720p"),
11: qualities.get("tvrip"),
1162: qualities.get("hdtv 1080p"),
12: qualities.get("hdtv 720p"),
18: qualities.get("ppvrip"),
1233: qualities.get("webdl"),
1174: qualities.get("webdl 1080p"),
1182: qualities.get("webdl"),
1175: qualities.get("webdl 720p"),
19: qualities.get("webrip")
}
def auth_required(func):
"""
Decorator for ensuring rest client is authenticated
or will doing it before execute the command
:param func:
:return:
"""
def wrapper(self, *args, **kwargs):
if not self.is_authenticated():
log.debug('None API token. Authenticating with "%s" account...', self.credentials.get('username'))
self.auth()
assert self.is_authenticated()
return func(self, *args, **kwargs)
return wrapper
class ApiError(Exception):
"""
Exception raise when RestClient received a business error
from T411 server.
"""
def __init__(self, code, description):
self.description = description
self.code = code
class T411RestClient(object):
"""A REST client for T411 API"""
@staticmethod
def template_url(url_scheme='http'):
return url_scheme + '://' + T411API_DOMAIN_URL + '%s'
@staticmethod
def download_url(torrent_id, url_scheme='http'):
return (T411RestClient.template_url(url_scheme) % T411API_DOWNLOAD_PATH) + str(torrent_id)
def __init__(self, username=None, password=None, url_scheme='http'):
self.credentials = {'username': username, 'password': password}
self.api_token = None
self.api_template_url = url_scheme + '://' + T411API_DOMAIN_URL + '%s'
self.web_session = Session()
def auth(self):
"""
Request server to obtain a api token. Obtained
token will be set for future usage of the client instance
:return:
"""
auth_url = self.api_template_url % T411API_AUTH_PATH
response = self.web_session.post(auth_url, self.credentials)
json_response = response.json()
error_description = json_response.get('error', None)
if error_description:
log.error('%d - %s', json_response.get('code'), error_description)
else:
self.set_api_token(json_response.get('token'))
def set_api_token(self, api_token):
"""
Set the client for use an api token.
:param api_token:
:return:
"""
self.api_token = api_token
self.web_session.headers.update({'Authorization': self.api_token})
def is_authenticated(self):
"""
:return: True if an api token is set. Note that the client
doesn't check if the token is valid (expired or wrong).
"""
return self.api_token is not None
@staticmethod
def raise_on_fail_response(json_response):
"""
This method throw an Exception if server return a
error message
:return:
"""
if json_response is None:
pass
error_name = json_response.get('error', None)
error_code = json_response.get('code', None)
if error_name is not None:
raise ApiError(error_code, error_name)
def get_json(self, path, params=None):
"""
Common method for requesting JSON response
:param path:
:return:
"""
url = self.api_template_url % path
request = self.web_session.get(url, params=params)
try:
result = request.json()
except ValueError:
log.debug("Response from %s was not JSON encoded. Attempting deep inspection...", path)
try:
last_line = request.text.splitlines()[-1]
result = json.loads(last_line)
except (ValueError, IndexError):
log.warning("Server response doesn't contains any JSON encoded response.")
raise
T411RestClient.raise_on_fail_response(result)
return result
@auth_required
def retrieve_category_tree(self):
"""
Request T411 API for retrieving categories and them
subcategories
:return**kwargs:
"""
return self.get_json(T411API_CATEGORY_TREE_PATH)
@auth_required
def retrieve_terms_tree(self):
"""
Request T411 API for retrieving term types
and terms
:return **kwargs:
"""
return self.get_json(T411API_TERMS_PATH)
@auth_required
def search(self, query):
"""
Search torrent
:param query: dict
:param query['category_id']: Int optional
:param query['result_per_page']: Int optional
:param query['page_index']: Int optional
:param query['terms']: (Term type id, Term id,)
:return dict
"""
url = T411API_SEARCH_PATH
if query.get('expression') is not None:
url += query['expression']
url_params = {}
if query.get('category_id') is not None:
# using cat or cid will do the same result
# but using cid without query expression will not broke
# results
url_params['cid'] = query['category_id']
if query.get('result_per_page') is not None:
url_params['limit'] = query['result_per_page']
if query.get('page_index') is not None:
url_params['offset'] = query['page_index']
if query.get('terms') is not None:
for (term_type_id, term_id) in query['terms']:
term_type_key_param = 'term[' + str(term_type_id) + '][]'
if url_params.get(term_type_key_param) is None:
url_params[term_type_key_param] = []
url_params[term_type_key_param].append(term_id)
return self.get_json(url, params=url_params)
@auth_required
def details(self, torrent_id):
url = T411API_DETAILS_PATH + str(torrent_id)
return self.get_json(url)
class T411ObjectMapper(object):
"""
Tool class to convert JSON object from the REST client
into object for ORM
"""
date_format = "%Y-%m-%d %H:%M:%S"
def map_category(self, json_category):
"""
Parse one JSON object of a category (and its subcategories) to Category
:param json_category: dict
:return:
"""
# Some categories are empty, so we reject them
if json_category.get('id') is None \
or json_category.get('pid') is None \
or json_category.get('name') is None:
return None
mapped_category = Category()
mapped_category.id = int(json_category.get(u'id'))
pid = int(json_category.get(u'pid'))
if pid == 0:
mapped_category.parent_id = None
else:
mapped_category.parent_id = pid
mapped_category.name = json_category.get(u'name')
json_sub_categories = json_category.get(u'cats')
if json_sub_categories is not None:
for json_sub_category in json_sub_categories.values():
mapped_sub_category = self.map_category(json_sub_category)
mapped_category.sub_categories.append(mapped_sub_category)
return mapped_category
def map_category_tree(self, json_category_tree):
"""
:param json_category_tree: dict
:return array of main Category, dict of [Integer, Category]
"""
indexed_categories = {}
main_categories = []
for json_main_category in json_category_tree.values():
main_category = self.map_category(json_main_category)
if main_category is not None:
main_categories.append(main_category)
indexed_categories[main_category.id] = main_category
for sub_category in main_category.sub_categories:
indexed_categories[sub_category.id] = sub_category
return main_categories, indexed_categories
@staticmethod
def map_term_type_tree(json_tree):
"""
:param json_tree: dict
:return: (array of tupple, dict of TermType)
"""
# term type definition can appears multiple times
category_to_term_type = [] # relations category-term type
term_types = {} # term types, indexed by termtype id
terms = {} # terms, indexed by id
for category_key, json_term_types in json_tree.items():
for term_type_key, term_type_content in json_term_types.items():
term_type_id = int(term_type_key)
category_to_term_type.append((int(category_key), term_type_id))
# if a term type has already parsed
# then we just record the category-term type relation
if term_type_id not in term_types:
term_type = TermType()
term_type.id = term_type_id
term_type.name = term_type_content.get('type')
term_type.mode = term_type_content.get('mode')
term_types[term_type.id] = term_type # index term type
for term_id, term_name in term_type_content.get('terms').items():
# Parsing & indexing terms
if term_id not in terms:
term = Term(id=int(term_id), name=term_name)
term_type.terms.append(term)
return category_to_term_type, term_types
@staticmethod
def map_search_result_entry(json_entry, download_auth=None):
"""
Parse json object of a torrent entry to flexget Entry
:param download_auth: Requests authenticator
"""
result = Entry()
result['t411_torrent_id'] = int(json_entry['id'])
result['title'] = json_entry['name']
result['url'] = T411RestClient.download_url(json_entry['id'])
result['t411_category'] = int(json_entry['category'])
result['seeders'] = int(json_entry['seeders'])
result['leechers'] = int(json_entry['leechers'])
result['t411_comments'] = int(json_entry['comments'])
result['t411_verified'] = json_entry['isVerified'] is '1'
result['t411_pubdate'] = datetime.strptime(json_entry['added'], T411ObjectMapper.date_format)
result['content_size'] = int(json_entry['size']) / (1024 ** 2)
result['t411_times_completed'] = int(json_entry['times_completed'])
result['t411_category_name'] = json_entry['categoryname']
result['t411_category_image'] = json_entry['categoryimage']
result['t411_privacy'] = json_entry['privacy']
result['t411_owner_id'] = int(json_entry['owner'])
result['t411_owner_username'] = json_entry['username']
result['download_auth'] = download_auth
return result
@staticmethod
def map_details(json_details, resolver):
"""
Parse json entry of details of a torrent entry
to Torrent object.
"""
result = Torrent()
result.id = json_details.get('id')
result.name = json_details.get('name')
result.category_id = json_details.get('category')
# Parse collection of termtype-termvalue
for (term_type_name, terms_candidat) in json_details.get('terms').items():
if isinstance(terms_candidat, list):
# Some terms type are multi-valuable, eg. Genres
for term_name in terms_candidat:
term_entity = resolver(result.category_id, term_type_name, term_name)
if term_entity is not None:
result.terms.append(term_entity)
else:
term_entity = resolver(result.category_id, term_type_name, terms_candidat)
if term_entity is not None:
result.terms.append(term_entity)
return result
def cache_required(func):
"""
Decorator for ensuring cached data into db.
If not a synchronize will be launched
:param func:
:return:
"""
def wrapper(self, *args, **kwargs):
if not self.has_cached_criterias():
log.debug('None cached data. Synchronizing...')
self.synchronize_database()
return func(self, *args, **kwargs)
return wrapper
class T411Proxy(object):
"""
A T411 proxy service. This proxy interact both with
T411 Rest Client and T411 local database.
"""
def __init__(self, session=None):
"""
:param session: flexget.manager.Session
"""
self.rest_client = T411RestClient()
self.mapper = T411ObjectMapper()
self.__has_cached_criterias = None
def __set_credential(self, username=None, password=None, api_token=None):
self.rest_client.api_token = api_token
self.rest_client.credentials = {
'username': username,
'password': password
}
@with_session
def set_credential(self, username=None, session=None):
"""
Set REST client credential from database
:param username: if set, account's credential will be used.
:return:
"""
query = session.query(Credential)
if username:
query = query.filter(Credential.username == username)
credential = query.first()
if credential is None:
raise PluginError('You cannot use t411 plugin without credentials. '
'Please set credential with "flexget t411 add-auth <username> <password>".')
self.__set_credential(credential.username, credential.password, credential.api_token)
@with_session
def has_cached_criterias(self, session=None):
"""
:return: True if database contains data of a previous synchronization
"""
if self.__has_cached_criterias is None:
self.__has_cached_criterias = session.query(Category).count() > 0
return self.__has_cached_criterias
@with_session
def synchronize_database(self, session=None):
"""
If database has been cleaned, this method
will update it.
:return:
"""
log.debug('T411Proxy start database synchronization with T411')
category_tree = self.rest_client.retrieve_category_tree()
term_tree = self.rest_client.retrieve_terms_tree()
main_categories, indexed_categories = self.mapper.map_category_tree(category_tree)
category_to_term_type, term_types = self.mapper.map_term_type_tree(term_tree)
log.debug('%d categories (%d are main categories) and %d term types retrieved',
len(indexed_categories),
len(main_categories),
len(term_types))
for (category_id, term_type_id) in category_to_term_type:
category = indexed_categories.get(category_id)
term_type = term_types.get(term_type_id)
category.term_types.append(term_type)
session.add_all(main_categories)
session.commit()
self.__has_cached_criterias = None
@cache_required
@with_session
def find_categories(self, category_name=None, is_sub_category=False, session=None):
query = session.query(Category)
if category_name is not None:
query = query.filter(Category.name == category_name)
if is_sub_category:
query = query.filter(Category.parent_id.isnot(None))
return query.all()
@cache_required
@with_session
def find_term_types(self, category_id=None, term_type_name=None, session=None):
query = session.query(TermType) \
.filter(TermType.name == term_type_name) \
.filter(TermType.categories.any(Category.id == category_id))
return query.one()
@cache_required
@with_session
def find_term_by_name(self, term_type_id, term_name, session=None):
return session.query(Term) \
.filter(Term.type_id == term_type_id) \
.filter(Term.name == term_name) \
.one()
@cache_required
@with_session
def find_term(self, category_id, term_type_name, term_name, session=None):
result = session.query(Term) \
.filter(Term.type.has(TermType.categories.any(Category.id == category_id))) \
.filter(Term.type.has(TermType.name == term_type_name)) \
.filter(Term.name == term_name) \
.first()
return result
@cache_required
@with_session
def main_categories(self, session=None):
query = session.query(Category).filter(Category.parent_id.is_(None))
return query.all()
@cache_required
@with_session
def all_category_names(self, categories_filter='all', session=None):
name_query = session.query(Category.name)
if categories_filter == 'sub':
name_query.filter(Category.parent_id is not None)
elif categories_filter == 'main':
name_query.filter(Category.parent_id is None)
return [name for (name,) in name_query.all()]
@cache_required
@with_session
def all_term_names(self, session=None):
name_query = session.query(Term.name).all()
return [name for (name,) in name_query]
@cache_required
@with_session
def friendly_query_to_client_query(self, friendly_query, session=None):
"""
:param FriendlySearchQuery query:
:return (,)[]: T411RestClient.search compatible
"""
client_query = {'expression': friendly_query.expression}
if friendly_query.category_name is not None:
try:
(category_id,) = session \
.query(Category.id) \
.filter(Category.name == friendly_query.category_name) \
.one()
client_query['category_id'] = category_id
log.debug('Category named "%s" resolved by id %d', friendly_query.category_name, category_id)
if len(friendly_query.term_names) > 0:
log.debug('Resolving terms : %s' % friendly_query.term_names)
or_like = (Term.name.like(friendly_query.term_names[0] + '%'))
for term_name in friendly_query.term_names[1:]:
or_like |= (Term.name.like(term_name + '%'))
client_query['terms'] = session \
.query(Term.type_id, Term.id) \
.filter(or_like) \
.filter(TermType.categories.any(Category.id == category_id)) \
.filter(Term.type_id == TermType.id).all()
except NoResultFound:
log.warning('Unable to resolve category named %s', friendly_query.category_name)
log.warning('Terms filter will be passed')
if friendly_query.max_results is not None:
client_query['result_per_page'] = friendly_query.max_results
client_query['page_index'] = 0
return client_query
def search(self, query):
"""
:param FriendlySearchQuery query:
:return:
"""
client_query = self.friendly_query_to_client_query(query)
json_results = self.rest_client.search(client_query)
json_torrents = json_results.get('torrents', [])
json_not_pending_torrents = [x for x in json_torrents if not isinstance(x, int)]
log.debug("Search produces %d results including %d 'on pending' (the latter will not produces entries)",
len(json_torrents),
len(json_torrents) - len(json_not_pending_torrents))
download_auth = T411BindAuth(self.rest_client.api_token)
map_function = partial(T411ObjectMapper.map_search_result_entry, download_auth=download_auth)
return list(map(map_function, json_not_pending_torrents))
@cache_required
@with_session
def details(self, torrent_id, session=None):
"""
WIP
Download and store torrent details
:param torrent_id:
:return:
"""
details = session \
.query(Torrent) \
.filter(Torrent.id == torrent_id) \
.first()
if details:
return details
else:
log.debug('Torrent %d cache miss. Online retrieving...', torrent_id)
# Cache dismiss, retrieve details via online way
json_details = self.rest_client.details(torrent_id)
def resolver(category_id, term_type_name, term_name):
return self.find_term(category_id, term_type_name, term_name, session=session)
details = self.mapper.map_details(json_details, resolver)
session.add(details)
session.commit()
return details
@with_session
def add_credential(self, username, password, session=None):
"""
Add a credential
:param username: T411 username
:param password: T411 password
:return: False if username still has an entry (password has been updated)
"""
credential = session.query(Credential).filter(Credential.username == username).first()
if credential:
credential.password = password
credential.api_token = None
result = False
else:
credential = Credential(username=username, password=password)
session.add(credential)
result = True
session.commit()
return result
@cache_required
@with_session
def parse_terms_to_quality(self, terms, session=None):
"""
If terms contains a term with the termtype 'video quality'
then this function convert it into a flexget Quality
else it return None
:param terms: Array of Term
:param session:
:return: flexget.utils.Quality
"""
video_quality_description = next((
term for term in terms
if term.get('term_type_id') == T411_TERM_TYPE_ID_VIDEO_QUALITY), None)
if video_quality_description is not None:
video_quality = T411_VIDEO_QUALITY_MAP.get(video_quality_description.get('term_id'))
return video_quality
else:
return None
class T411BindAuth(AuthBase):
def __init__(self, api_token):
self.api_token = api_token
def __call__(self, request):
request.headers['authorization'] = self.api_token
return request
@event('manager.db_cleanup')
def db_cleanup(manager, session):
session.query(Category).delete(synchronize_session=False)
session.query(TermType).delete(synchronize_session=False)
session.query(Term).delete(synchronize_session=False)
| {
"content_hash": "0e8af7bd593c0c75c3dc61c6f3e26a0b",
"timestamp": "",
"source": "github",
"line_count": 752,
"max_line_length": 112,
"avg_line_length": 36.4813829787234,
"alnum_prop": 0.600058321790479,
"repo_name": "qvazzler/Flexget",
"id": "5a098c40e1e627f179cb9eb381fd9449f1760815",
"size": "27434",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "flexget/plugins/api_t411.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5275"
},
{
"name": "HTML",
"bytes": "33930"
},
{
"name": "JavaScript",
"bytes": "58811"
},
{
"name": "Python",
"bytes": "2428468"
}
],
"symlink_target": ""
} |
from mrjob.job import MRJob
from mrjob.protocol import JSONValueProtocol
import json
def bbox_contains(bbox, longitude, latitude):
sw_lon, sw_lat, ne_lon, ne_lat = bbox
return (sw_lon <= longitude <= ne_lon) and (sw_lat <= latitude <= ne_lat)
class GeoExtract(MRJob):
INPUT_PROTOCOL = JSONValueProtocol
def configure_options(self):
super(GeoExtract, self).configure_options()
# add_file_option: http://mrjob.readthedocs.org/en/latest/guides/writing-mrjobs.html
self.add_file_option('--containers', help='.geojson feature collection to filter for')
def mapper_init(self):
with open(self.options.containers) as fp:
self.feature_collection = json.load(fp)
def mapper(self, _, line):
# Ignore metadata / reports
if 'info' in line and line['info']['message'] == 'Replay Request Completed':
return
# if any(rule['value'] == 'has:geo' line['gnip']['matching_rules']):
if 'geo' in line and line['geo'].get('type') == 'Point':
latitude, longitude = line['geo']['coordinates']
for feature in self.feature_collection['features']:
if bbox_contains(feature['bbox'], longitude, latitude):
yield feature['properties']['name'], line
if __name__ == '__main__':
# Maybe run the whole thing in a try-catch-finally with counters for error logging?
# might make it easier to debug than pulling down the whole bucket of attempts and
# browsing through the stderr files to find the tracebacks
# http://pythonhosted.org/mrjob/guides/writing-mrjobs.html#counters
GeoExtract.run()
| {
"content_hash": "6148e254906c36e15fa1788130917182",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 94,
"avg_line_length": 40.41463414634146,
"alnum_prop": 0.6499698249849125,
"repo_name": "dssg/tweedr",
"id": "3a849d8dc106b1cca150b693999833d4eb4233c4",
"size": "1657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tweedr/emr/gnip_geo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "862"
},
{
"name": "JavaScript",
"bytes": "190761"
},
{
"name": "PHP",
"bytes": "47623"
},
{
"name": "Python",
"bytes": "132011"
},
{
"name": "R",
"bytes": "788"
}
],
"symlink_target": ""
} |
import unittest
import sys, os, glob
test_root = os.path.dirname(os.path.abspath(__file__))
test_files = glob.glob(os.path.join(test_root, "test_*.py"))
os.chdir(test_root)
sys.path.insert(0, os.path.dirname(test_root))
sys.path.insert(0, test_root)
if len(sys.argv) == 2:
test_names = ["test_%s" % sys.argv[1]]
else:
test_files = glob.glob(os.path.join(test_root, "test_*.py"))
test_names = [os.path.basename(name)[:-3] for name in test_files]
suite = unittest.defaultTestLoader.loadTestsFromNames(test_names)
def run():
import macaron
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit((result.errors or result.failures) and 1 or 0)
if __name__ == '__main__':
run()
| {
"content_hash": "fbfdf4e0453532b427bc1103e43c443e",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 69,
"avg_line_length": 28.68,
"alnum_prop": 0.6736401673640168,
"repo_name": "nobrin/macaron",
"id": "2c136cdcbdc529d31198006b543ecdc5de42e694",
"size": "775",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/testall.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "118154"
}
],
"symlink_target": ""
} |
from qingcloud.cli.iaas_client.actions import job
from qingcloud.cli.iaas_client.actions import instance
from qingcloud.cli.iaas_client.actions import instance_groups
from qingcloud.cli.iaas_client.actions import volume
from qingcloud.cli.iaas_client.actions import nic
from qingcloud.cli.iaas_client.actions import eip
from qingcloud.cli.iaas_client.actions import image
from qingcloud.cli.iaas_client.actions import keypair
from qingcloud.cli.iaas_client.actions import router
from qingcloud.cli.iaas_client.actions import sg
from qingcloud.cli.iaas_client.actions import vxnet
from qingcloud.cli.iaas_client.actions import lb
from qingcloud.cli.iaas_client.actions import server_certificate
from qingcloud.cli.iaas_client.actions import monitor
from qingcloud.cli.iaas_client.actions import snapshot
from qingcloud.cli.iaas_client.actions import dns_alias
from qingcloud.cli.iaas_client.actions import tag
from qingcloud.cli.iaas_client.actions import notification
from qingcloud.cli.iaas_client.actions import s2
from qingcloud.cli.iaas_client.actions import alarm_policy
from qingcloud.cli.iaas_client.actions import billing
from qingcloud.cli.iaas_client.actions import collaboration
from qingcloud.cli.iaas_client.actions import sdwan
from qingcloud.cli.iaas_client.actions import cluster
class ActionManager(object):
@classmethod
def get_action(cls, action):
return cls.action_table.get(action)
@classmethod
def get_valid_actions(cls):
return sorted(ActionManager.action_table.keys())
action_table = {
## notification ##
'describe-notification-center-user-posts': notification.DescribeNotificationCenterUserPostsAction,
'create-notification-items': notification.CreateNotificationItemsAction,
'create-notification-list': notification.CreateNotificationListAction,
'delete-notification-items': notification.DeleteNotificationItemsAction,
'delete-notification-lists': notification.DeleteNotificationListsAction,
'describe-notification-items': notification.DescribeNotificationItemsAction,
'describe-notification-lists': notification.DescribeNotificationListsAction,
'modify-notification-list-attributes': notification.ModifyNotificationListAttributesAction,
'verify-notification-item': notification.VerifyNotificationItemAction,
## job ##
'describe-jobs': job.DescribeJobsAction,
## instance ##
'run-instances': instance.RunInstancesAction,
'modify-instance-attributes': instance.ModifyInstanceAttributesAction,
'reset-instances': instance.ResetInstancesAction,
'resize-instances': instance.ResizeInstancesAction,
'describe-instances': instance.DescribeInstancesAction,
'restart-instances': instance.RestartInstancesAction,
'start-instances': instance.StartInstancesAction,
'stop-instances': instance.StopInstancesAction,
'terminate-instances': instance.TerminateInstancesAction,
'clone-instances': instance.CloneInstancesAction,
## instance gorups ##
'create-instance-groups': instance_groups.CreateInstanceGroupsAction,
'delete-instance-groups': instance_groups.DeleteInstanceGroupsAction,
'join-instance-group': instance_groups.JoinInstanceGroupAction,
'leave-instance-group': instance_groups.LeaveInstanceGroupAction,
'describe-instance-groups': instance_groups.DescribeInstanceGroupsAction,
## volume ##
'clone-volumes': volume.CloneVolumesAction,
'create-volumes': volume.CreateVolumesAction,
'modify-volume-attributes': volume.ModifyVolumeAttributesAction,
'describe-volumes': volume.DescribeVolumesAction,
'attach-volumes': volume.AttachVolumesAction,
'detach-volumes': volume.DetachVolumesAction,
'delete-volumes': volume.DeleteVolumesAction,
'resize-volumes': volume.ResizeVolumesAction,
## nic ##
'create-nics': nic.CreateNicsAction,
'modify-nic-attributes': nic.ModifyNicAttributesAction,
'describe-nics': nic.DescribeNicsAction,
'attach-nics': nic.AttachNicsAction,
'detach-nics': nic.DetachNicsAction,
'delete-nics': nic.DeleteNicsAction,
## eip ##
'describe-eips': eip.DescribeEipsAction,
'allocate-eips': eip.AllocateEipsAction,
'release-eips': eip.ReleaseEipsAction,
'modify-eip-attributes': eip.ModifyEipAttributesAction,
'associate-eip': eip.AssociateEipAction,
'dissociate-eips': eip.DissociateEipsAction,
'change-eips-bandwidth': eip.ChangeEipsBandwidthAction,
'change-eips-billing-mode': eip.ChangeEipsBillingModeAction,
## sg ##
'create-security-group': sg.CreateSecurityGroupAction,
'describe-security-groups': sg.DescribeSecurityGroupsAction,
'modify-security-group-attributes': sg.ModifySecurityGroupAttributesAction,
'apply-security-group': sg.ApplySecurityGroupAction,
'delete-security-groups': sg.DeleteSecurityGroupsAction,
'describe-security-group-rules': sg.DescribeSecurityGroupRulesAction,
'modify-security-group-rule-attributes': sg.ModifySecurityGroupRuleAttributesAction,
'add-security-group-rules': sg.AddSecurityGroupRulesAction,
'delete-security-group-rules': sg.DeleteSecurityGroupRulesAction,
'describe-security-group-ipsets': sg.DescribeSecurityGroupIPSetsAction,
'modify-security-group-ipset-attributes': sg.ModifySecurityGroupIPSetAttributesAction,
'create-security-group-ipset': sg.CreateSecurityGroupIPSetAction,
'delete-security-group-ipsets': sg.DeleteSecurityGroupIPSetsAction,
## keypair ##
'create-keypair': keypair.CreateKeyPairAction,
'describe-keypairs': keypair.DescribeKeyPairsAction,
'attach-keypairs': keypair.AttachKeyPairsAction,
'detach-keypairs': keypair.DetachKeyPairsAction,
'modify-keypair-attributes': keypair.ModifyKeyPairAttributesAction,
'delete-keypairs': keypair.DeleteKeyPairsAction,
## vxnet ##
'create-vxnets': vxnet.CreateVxnetsAction,
'describe-vxnet-instances': vxnet.DescribeVxnetInstancesAction,
'describe-vxnets': vxnet.DescribeVxnetsAction,
'join-vxnet': vxnet.JoinVxnetAction,
'leave-vxnet': vxnet.LeaveVxnetAction,
'modify-vxnet-attributes': vxnet.ModifyVxnetAttributesAction,
'delete-vxnets': vxnet.DeleteVxnetsAction,
## router ##
'add-router-statics': router.AddRouterStaticsAction,
'add-router-static-entries': router.AddRouterStaticEntriesAction,
'join-router': router.JoinRouterAction,
'leave-router': router.LeaveRouterAction,
'create-routers': router.CreateRoutersAction,
'modify-router-attributes': router.ModifyRouterAttributesAction,
'modify-router-static-attributes': router.ModifyRouterStaticAttributesAction,
'modify-router-static-entry-attributes': router.ModifyRouterStaticEntryAttributesAction,
'delete-router-statics': router.DeleteRouterStaticsAction,
'delete-router-static-entries': router.DeleteRouterStaticEntriesAction,
'delete-routers': router.DeleteRoutersAction,
'poweroff-routers': router.PowerOffRoutersAction,
'poweron-routers': router.PowerOnRoutersAction,
'describe-routers': router.DescribeRoutersAction,
'describe-router-statics': router.DescribeRouterStaticsAction,
'describe-router-static-entries': router.DescribeRouterStaticEntriesAction,
'describe-router-vxnets': router.DescribeRouterVxnetsAction,
'update-routers': router.UpdateRoutersAction,
## image ##
'describe-images': image.DescribeImagesAction,
'modify-image-attributes': image.ModifyImageAttributesAction,
'capture-instance': image.CaptureInstanceAction,
'delete-images': image.DeleteImagesAction,
## load balancer ##
'add-loadbalancer-backends': lb.AddLoadBalancerBackendsAction,
'add-loadbalancer-listeners': lb.AddLoadBalancerListenersAction,
'associate-eips-to-loadbalancer': lb.AssociateEipsToLoadBalancerAction,
'add-loadbalancer-policy-rules': lb.AddLoadBalancerPolicyRulesAction,
'create-loadbalancers': lb.CreateLoadBalancerAction,
'delete-loadbalancer-backends': lb.DeleteLoadBalancerBackendsAction,
'delete-loadbalancer-listeners': lb.DeleteLoadBalancerListenersAction,
'delete-loadbalancers': lb.DeleteLoadBalancersAction,
'describe-loadbalancer-backends': lb.DescribeLoadBalancerBackendsAction,
'describe-loadbalancer-listeners': lb.DescribeLoadBalancerListenersAction,
'describe-loadbalancers': lb.DescribeLoadBalancersAction,
'dissociate-eips-from-loadbalancer': lb.DissociateEipsFromLoadBalancerAction,
'modify-loadbalancer-attributes': lb.ModifyLoadBalancerAttributesAction,
'modify-loadbalancer-backend-attributes': lb.ModifyLoadBalancerBackendAttributesAction,
'modify-loadbalancer-listener-attributes': lb.ModifyLoadBalancerListenerAttributessAction,
'start-loadbalancers': lb.StartLoadBalancersAction,
'stop-loadbalancers': lb.StopLoadBalancersAction,
'update-loadbalancers': lb.UpdateLoadBalancersAction,
## server certificate
'describe-server-certificates': server_certificate.DescribeServerCertificatesAction,
'create-server-certificate': server_certificate.CreateServerCertificateAction,
'delete-server-certificates': server_certificate.DeleteServerCertificatesAction,
'modify-server-certificate-attributes': server_certificate.ModifyServerCertificateAttributesAction,
## monitor ##
'get-monitoring-data': monitor.GetMonitorAction,
'get-loadbalancer-monitoring-data': monitor.GetLoadBalancerMonitorAction,
## snapshot ##
'describe-snapshots': snapshot.DescribeSnapshotsAction,
'create-snapshots': snapshot.CreateSnapshotsAction,
'delete-snapshots': snapshot.DeleteSnapshotsAction,
'apply-snapshots': snapshot.ApplySnapshotsAction,
'modify-snapshot-attributes': snapshot.ModifySnapshotAttributesAction,
'capture-instance-from-snapshot': snapshot.CaptureInstanceFromSnapshotAction,
'create-volume-from-snapshot': snapshot.CreateVolumeFromSnapshotAction,
## dns alias ##
'describe-dns-aliases': dns_alias.DescribeDNSAliasesAction,
'associate-dns-alias': dns_alias.AssociateDNSAliasAction,
'dissociate-dns-aliases': dns_alias.DissociateDNSAliasesAction,
'get-dns-label': dns_alias.GetDNSLabelAction,
## tag ##
'create-tag': tag.CreateTagAction,
'describe-tags': tag.DescribeTagsAction,
'attach-tags': tag.AttachTagsAction,
'detach-tags': tag.DetachTagsAction,
'modify-tag-attributes': tag.ModifyTagAttributesAction,
'delete-tags': tag.DeleteTagsAction,
## S2 ##
'create-s2-server': s2.CreateS2ServerAction,
'describe-s2-servers': s2.DescribeS2ServersAction,
'modify-s2-server': s2.ModifyS2ServerAttributesAction,
'resize-s2-servers': s2.ResizeS2ServersAction,
'delete-s2-servers': s2.DeleteS2ServersAction,
'poweron-s2-servers': s2.PowerOnS2ServersAction,
'poweroff-s2-servers': s2.PowerOffS2ServersAction,
'update-s2-servers': s2.UpdateS2ServersAction,
'change-s2-server-vxnet': s2.ChangeS2ServerVxnetAction,
'create-s2-shared-target': s2.CreateS2SharedTargetAction,
'describe-s2-shared-targets': s2.DescribeS2SharedTargetsAction,
'delete-s2-shared-targets': s2.DeleteS2SharedTargetsAction,
'enable-s2-shared-targets': s2.EnableS2SharedTargetsAction,
'disable-s2-shared-targets': s2.DisableS2SharedTargetsAction,
'modify-s2-shared-target-attributes': s2.ModifyS2SharedTargetAttributesAction,
'attach-to-s2-shared-target': s2.AttachToS2SharedTargetAction,
'detach-from-s2-shared-target': s2.DetachFromS2SharedTargetAction,
'describe-s2-default-parameters': s2.DescribeS2DefaultParametersAction,
'create-s2-group': s2.CreateS2GroupAction,
'describe-s2-groups': s2.DescribeS2GroupsAction,
'modify-s2-group': s2.ModifyS2GroupAction,
'delete-s2-group': s2.DeleteS2GroupsAction,
'create-s2-account': s2.CreateS2AccountAction,
'describe-s2-accounts': s2.DescribeS2AccountsAction,
'modify-s2-account': s2.ModifyS2AccountAction,
'delete-s2-accounts': s2.DeleteS2AccountsAction,
'associate-s2-account-group': s2.AssociateS2AccountGroupAction,
'dissociate-s2-account-group': s2.DissociateS2AccountGroupAction,
## alarm policy ##
'describe-alarm-policies': alarm_policy.DescribeAlarmPoliciesAction,
'add-alarm-policy-actions': alarm_policy.AddAlarmPolicyActionsAction,
'add-alarm-policy-rules': alarm_policy.AddAlarmPolicyRulesAction,
'apply-alarm-policy': alarm_policy.ApplyAlarmPolicyAction,
'associate-alarm-policy': alarm_policy.AssociateAlarmPolicyAction,
'create-alarm-policy': alarm_policy.CreateAlarmPolicyAction,
'delete-alarm-policies': alarm_policy.DeleteAlarmPoliciesAction,
'delete-alarm-policy-actions': alarm_policy.DeleteAlarmPolicyActionsAction,
'delete-alarm-policy-rules': alarm_policy.DeleteAlarmPolicyRulesAction,
'describe-alarm-history': alarm_policy.DescribeAlarmHistoryAction,
'describe-alarm-policy-actions': alarm_policy.DescribeAlarmPolicyActionsAction,
'describe-alarm-policy-rules': alarm_policy.DescribeAlarmPolicyRulesAction,
'describe-alarms': alarm_policy.DescribeAlarmsAction,
'dissociate-alarm-policy': alarm_policy.DissociateAlarmPolicyAction,
'modify-alarm-policy-action-attributes': alarm_policy.ModifyAlarmPolicyActionAttributesAction,
'modify-alarm-policy-attributes': alarm_policy.ModifyAlarmPolicyAttributesAction,
'modify-alarm-policy-rule-attributes': alarm_policy.ModifyAlarmPolicyRuleAttributesAction,
## billing ##
'get-balance': billing.GetBalanceAction,
'get-lease-info': billing.GetLeaseInfoAction,
## collaboration ##
'add-group-role-rules': collaboration.AddGroupRoleRulesAction,
'add-resource-group-items': collaboration.AddResourceGroupItemsAction,
'add-user-group-members': collaboration.AddUserGroupMembersAction,
'create-group-roles': collaboration.CreateGroupRolesAction,
'create-resource-groups': collaboration.CreateResourceGroupsAction,
'create-user-groups': collaboration.CreateUserGroupsAction,
'delete-group-role-rules': collaboration.DeleteGroupRoleRulesAction,
'delete-group-roles': collaboration.DeleteGroupRolesAction,
'delete-resource-group-items': collaboration.DeleteResourceGroupItemsAction,
'delete-resource-groups': collaboration.DeleteResourceGroupsAction,
'delete-user-group-members': collaboration.DeleteUserGroupMembersAction,
'delete-user-groups': collaboration.DeleteUserGroupsAction,
'describe-group-role-rules': collaboration.DescribeGroupRoleRulesAction,
'describe-group-roles': collaboration.DescribeGroupRolesAction,
'describe-resource-group-items': collaboration.DescribeResourceGroupItemsAction,
'describe-resource-groups': collaboration.DescribeResourceGroupsAction,
'describe-resource-user-groups': collaboration.DescribeResourceUserGroupsAction,
'describe-shared-resource-groups': collaboration.DescribeSharedResourceGroupsAction,
'describe-user-group-members': collaboration.DescribeUserGroupMembersAction,
'describe-user-groups': collaboration.DescribeUserGroupsAction,
'grant-resource-groups-to-user-groups': collaboration.GrantResourceGroupsToUserGroupsAction,
'modify-group-role-attributes': collaboration.ModifyGroupRoleAttributesAction,
'modify-group-role-rule-attributes': collaboration.ModifyGroupRoleRuleAttributesAction,
'modify-resource-group-attributes': collaboration.ModifyResourceGroupAttributesAction,
'modify-user-group-attributes': collaboration.ModifyUserGroupAttributesAction,
'modify-user-group-member-attributes': collaboration.ModifyUserGroupMemberAttributesAction,
'revoke-resource-groups-from-user-groups': collaboration.RevokeResourceGroupsFromUserGroupsAction,
## sdwan ##
'describe-wan-accesss': sdwan.DescribeWanAccesssAction,
'change-wan-access-bandwidth': sdwan.ChangeWanAccessBandwidthAction,
'upgrade-wan-access': sdwan.UpgradeWanAccessAction,
'get-wan-monitor': sdwan.GetWanMonitorAction,
'get-wan-info': sdwan.GetWanInfoAction,
## cluster ##
'deploy-app-version': cluster.DeployAppVersionAction,
}
| {
"content_hash": "fd2e1b1ec54ab557e0403f85a55b56d2",
"timestamp": "",
"source": "github",
"line_count": 301,
"max_line_length": 111,
"avg_line_length": 59.730897009966775,
"alnum_prop": 0.7018744090327604,
"repo_name": "yunify/qingcloud-cli",
"id": "5b7a435cab9a54072af4bb67003bb4852e7ad48c",
"size": "18812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qingcloud/cli/iaas_client/actions/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "852"
},
{
"name": "Python",
"bytes": "607642"
}
],
"symlink_target": ""
} |
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PolicyOperations(object):
"""PolicyOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.security.attestation._generated.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
attestation_type, # type: Union[str, "_models.AttestationType"]
**kwargs # type: Any
):
# type: (...) -> "_models.PolicyResponse"
"""Retrieves the current policy for an attestation type.
Retrieves the current policy for an attestation type.
:param attestation_type: Specifies the trusted execution environment to be used to validate the
evidence.
:type attestation_type: str or ~azure.security.attestation._generated.models.AttestationType
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyResponse, or the result of cls(response)
:rtype: ~azure.security.attestation._generated.models.PolicyResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'instanceUrl': self._serialize.url("self._config.instance_url", self._config.instance_url, 'str', skip_quote=True),
'attestationType': self._serialize.url("attestation_type", attestation_type, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('PolicyResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/policies/{attestationType}'} # type: ignore
def set(
self,
attestation_type, # type: Union[str, "_models.AttestationType"]
new_attestation_policy, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PolicyResponse"
"""Sets the policy for a given attestation type.
Sets the policy for a given attestation type.
:param attestation_type: Specifies the trusted execution environment to be used to validate the
evidence.
:type attestation_type: str or ~azure.security.attestation._generated.models.AttestationType
:param new_attestation_policy: JWT Expressing the new policy whose body is a
StoredAttestationPolicy object.
:type new_attestation_policy: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyResponse, or the result of cls(response)
:rtype: ~azure.security.attestation._generated.models.PolicyResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01"
content_type = kwargs.pop("content_type", "text/plain")
accept = "application/json"
# Construct URL
url = self.set.metadata['url'] # type: ignore
path_format_arguments = {
'instanceUrl': self._serialize.url("self._config.instance_url", self._config.instance_url, 'str', skip_quote=True),
'attestationType': self._serialize.url("attestation_type", attestation_type, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(new_attestation_policy, 'str')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('PolicyResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set.metadata = {'url': '/policies/{attestationType}'} # type: ignore
def reset(
self,
attestation_type, # type: Union[str, "_models.AttestationType"]
policy_jws, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PolicyResponse"
"""Resets the attestation policy for the specified tenant and reverts to the default policy.
Resets the attestation policy for the specified tenant and reverts to the default policy.
:param attestation_type: Specifies the trusted execution environment to be used to validate the
evidence.
:type attestation_type: str or ~azure.security.attestation._generated.models.AttestationType
:param policy_jws: JSON Web Signature with an empty policy document.
:type policy_jws: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyResponse, or the result of cls(response)
:rtype: ~azure.security.attestation._generated.models.PolicyResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01"
content_type = kwargs.pop("content_type", "text/plain")
accept = "application/json"
# Construct URL
url = self.reset.metadata['url'] # type: ignore
path_format_arguments = {
'instanceUrl': self._serialize.url("self._config.instance_url", self._config.instance_url, 'str', skip_quote=True),
'attestationType': self._serialize.url("attestation_type", attestation_type, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(policy_jws, 'str')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('PolicyResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
reset.metadata = {'url': '/policies/{attestationType}:reset'} # type: ignore
| {
"content_hash": "6dd42afb589cd75bcb6a43cb780c01a9",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 133,
"avg_line_length": 46.675324675324674,
"alnum_prop": 0.6597106288258208,
"repo_name": "Azure/azure-sdk-for-python",
"id": "9092ecfc1ab3adf76378b0e2e7e8219ac71a6d54",
"size": "11249",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/attestation/azure-security-attestation/azure/security/attestation/_generated/operations/_policy_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Settings(object):
def setupUi(self, Settings):
Settings.setObjectName(_fromUtf8("Settings"))
Settings.resize(546, 502)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/icons/settings.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Settings.setWindowIcon(icon)
self.gridLayout = QtGui.QGridLayout(Settings)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.groupBox_5 = QtGui.QGroupBox(Settings)
self.groupBox_5.setObjectName(_fromUtf8("groupBox_5"))
self.gridLayout_6 = QtGui.QGridLayout(self.groupBox_5)
self.gridLayout_6.setObjectName(_fromUtf8("gridLayout_6"))
self.label_8 = QtGui.QLabel(self.groupBox_5)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.gridLayout_6.addWidget(self.label_8, 0, 0, 1, 1)
spacerItem = QtGui.QSpacerItem(194, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_6.addItem(spacerItem, 0, 1, 2, 1)
self.dutTmpFilesLocationEdit = QtGui.QLineEdit(self.groupBox_5)
self.dutTmpFilesLocationEdit.setObjectName(_fromUtf8("dutTmpFilesLocationEdit"))
self.gridLayout_6.addWidget(self.dutTmpFilesLocationEdit, 1, 2, 2, 1)
self.label_9 = QtGui.QLabel(self.groupBox_5)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.gridLayout_6.addWidget(self.label_9, 2, 0, 1, 1)
spacerItem1 = QtGui.QSpacerItem(194, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_6.addItem(spacerItem1, 2, 1, 1, 1)
self.dutLogLocationEdit = QtGui.QLineEdit(self.groupBox_5)
self.dutLogLocationEdit.setObjectName(_fromUtf8("dutLogLocationEdit"))
self.gridLayout_6.addWidget(self.dutLogLocationEdit, 0, 2, 1, 1)
self.gridLayout.addWidget(self.groupBox_5, 5, 0, 1, 3)
self.groupBox = QtGui.QGroupBox(Settings)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.label_2 = QtGui.QLabel(self.groupBox)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout_2.addWidget(self.label_2, 0, 0, 1, 1)
self.label_3 = QtGui.QLabel(self.groupBox)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_2.addWidget(self.label_3, 1, 0, 1, 1)
self.loggingFileLevel = QtGui.QComboBox(self.groupBox)
self.loggingFileLevel.setObjectName(_fromUtf8("loggingFileLevel"))
self.gridLayout_2.addWidget(self.loggingFileLevel, 1, 3, 1, 1)
self.label_4 = QtGui.QLabel(self.groupBox)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout_2.addWidget(self.label_4, 2, 0, 1, 1)
self.loggingStreamLevel = QtGui.QComboBox(self.groupBox)
self.loggingStreamLevel.setObjectName(_fromUtf8("loggingStreamLevel"))
self.gridLayout_2.addWidget(self.loggingStreamLevel, 2, 3, 1, 1)
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem2, 2, 1, 1, 1)
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem3, 1, 1, 1, 1)
spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem4, 1, 2, 1, 1)
spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem5, 2, 2, 1, 1)
self.loggingFileEdit = QtGui.QLineEdit(self.groupBox)
self.loggingFileEdit.setObjectName(_fromUtf8("loggingFileEdit"))
self.gridLayout_2.addWidget(self.loggingFileEdit, 0, 2, 1, 2)
spacerItem6 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem6, 0, 1, 1, 1)
self.gridLayout.addWidget(self.groupBox, 0, 0, 1, 3)
self.groupBox_3 = QtGui.QGroupBox(Settings)
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.gridLayout_4 = QtGui.QGridLayout(self.groupBox_3)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
spacerItem7 = QtGui.QSpacerItem(199, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_4.addItem(spacerItem7, 0, 1, 1, 1)
self.WorkspaceLocation = QtGui.QLineEdit(self.groupBox_3)
self.WorkspaceLocation.setObjectName(_fromUtf8("WorkspaceLocation"))
self.gridLayout_4.addWidget(self.WorkspaceLocation, 0, 2, 1, 1)
self.label_5 = QtGui.QLabel(self.groupBox_3)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout_4.addWidget(self.label_5, 0, 0, 1, 1)
self.gridLayout.addWidget(self.groupBox_3, 3, 0, 1, 3)
self.groupBox_2 = QtGui.QGroupBox(Settings)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.gridLayout_3 = QtGui.QGridLayout(self.groupBox_2)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.label = QtGui.QLabel(self.groupBox_2)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout_3.addWidget(self.label, 0, 0, 1, 1)
spacerItem8 = QtGui.QSpacerItem(226, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_3.addItem(spacerItem8, 0, 1, 1, 1)
self.STAFDirEdit = QtGui.QLineEdit(self.groupBox_2)
self.STAFDirEdit.setText(_fromUtf8(""))
self.STAFDirEdit.setObjectName(_fromUtf8("STAFDirEdit"))
self.gridLayout_3.addWidget(self.STAFDirEdit, 0, 2, 1, 1)
self.gridLayout.addWidget(self.groupBox_2, 2, 0, 1, 3)
self.groupBox_4 = QtGui.QGroupBox(Settings)
self.groupBox_4.setObjectName(_fromUtf8("groupBox_4"))
self.gridLayout_5 = QtGui.QGridLayout(self.groupBox_4)
self.gridLayout_5.setObjectName(_fromUtf8("gridLayout_5"))
self.toolLocationEdit = QtGui.QLineEdit(self.groupBox_4)
self.toolLocationEdit.setObjectName(_fromUtf8("toolLocationEdit"))
self.gridLayout_5.addWidget(self.toolLocationEdit, 0, 2, 1, 1)
self.label_6 = QtGui.QLabel(self.groupBox_4)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.gridLayout_5.addWidget(self.label_6, 0, 0, 1, 1)
self.toolConfigureFileEdit = QtGui.QLineEdit(self.groupBox_4)
self.toolConfigureFileEdit.setObjectName(_fromUtf8("toolConfigureFileEdit"))
self.gridLayout_5.addWidget(self.toolConfigureFileEdit, 1, 2, 3, 1)
spacerItem9 = QtGui.QSpacerItem(150, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_5.addItem(spacerItem9, 0, 1, 1, 1)
spacerItem10 = QtGui.QSpacerItem(150, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_5.addItem(spacerItem10, 2, 1, 1, 1)
self.label_7 = QtGui.QLabel(self.groupBox_4)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridLayout_5.addWidget(self.label_7, 2, 0, 1, 1)
self.gridLayout.addWidget(self.groupBox_4, 4, 0, 1, 3)
self.buttonBox = QtGui.QDialogButtonBox(Settings)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridLayout.addWidget(self.buttonBox, 7, 0, 1, 2)
self.restoreButton = QtGui.QPushButton(Settings)
self.restoreButton.setObjectName(_fromUtf8("restoreButton"))
self.gridLayout.addWidget(self.restoreButton, 6, 2, 1, 1)
self.retranslateUi(Settings)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Settings.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Settings.reject)
QtCore.QMetaObject.connectSlotsByName(Settings)
def retranslateUi(self, Settings):
Settings.setWindowTitle(_translate("Settings", "Settings", None))
self.groupBox_5.setTitle(_translate("Settings", "DUT settings", None))
self.label_8.setText(_translate("Settings", "DUT log location", None))
self.label_9.setText(_translate("Settings", "DUT tmp files location", None))
self.groupBox.setTitle(_translate("Settings", "logging settings", None))
self.label_2.setText(_translate("Settings", "logging file", None))
self.label_3.setText(_translate("Settings", "logging file level", None))
self.label_4.setText(_translate("Settings", "logging stream level", None))
self.groupBox_3.setTitle(_translate("Settings", "Workspace settings", None))
self.label_5.setText(_translate("Settings", "Workspace location", None))
self.groupBox_2.setTitle(_translate("Settings", "STAF settings", None))
self.label.setText(_translate("Settings", "STAF dir", None))
self.groupBox_4.setTitle(_translate("Settings", "Tool settings", None))
self.label_6.setText(_translate("Settings", "Tool location", None))
self.label_7.setText(_translate("Settings", "Tool configure file", None))
self.restoreButton.setText(_translate("Settings", "Restore default settings", None))
import resources_rc
| {
"content_hash": "c011f9ed1c7a52982bee604c343bbe2e",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 115,
"avg_line_length": 62.05625,
"alnum_prop": 0.6933225903917817,
"repo_name": "xcgspring/XSTAF",
"id": "0d4a42e863c8668b538bb6676cc303bc236b998e",
"size": "10165",
"binary": false,
"copies": "1",
"ref": "refs/heads/ver0.1",
"path": "XSTAF/ui/ui_settingsDialog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "7227"
},
{
"name": "Python",
"bytes": "980326"
}
],
"symlink_target": ""
} |
import json
import pytest
import responses
from koordinates import Set, Client, Group, Publish
from .response_data.responses_3 import (
sets_single_good_simulated_response,
sets_new_draft_good_simulated_response,
sets_single_draft_good_simulated_response,
sets_multi_version_good_simulated_response,
sets_single_version_good_simulated_response,
sets_publish_version_good_simulated_response,
)
from .response_data.responses_4 import sets_multiple_good_simulated_response
@pytest.fixture
def client():
return Client("test.koordinates.com", token="test")
@responses.activate
def test_get_set_by_id(client):
the_response = sets_single_good_simulated_response
responses.add(
responses.GET,
client.get_url("SET", "GET", "single", {"id": 1474}),
body=the_response,
status=200,
content_type="application/json",
)
obj = client.sets.get(1474)
assert isinstance(obj, Set)
assert obj.title == "Ultra Fast Broadband Initiative Coverage"
assert obj.group.name == "New Zealand Broadband Map"
assert (
obj.url_html
== "https://test.koordinates.com/set/933-ultra-fast-broadband-initiative-coverage/"
)
@responses.activate
def test_get_set_set_returns_all_rows(client):
the_response = sets_multiple_good_simulated_response
responses.add(
responses.GET,
client.get_url("SET", "GET", "multi"),
body=the_response,
status=200,
content_type="application/json",
)
cnt_of_sets_returned = 0
for layer in client.sets.list():
cnt_of_sets_returned += 1
assert cnt_of_sets_returned == 2
@responses.activate
def test_set_list_drafts(client):
# create a set, then check that it returns as a draft
responses.add(
responses.POST,
client.get_url("SET", "POST", "create"),
body=sets_new_draft_good_simulated_response,
status=201,
adding_headers={
"Location": "https://test.koordinates.com/services/api/v1/sets/1/"
},
)
responses.add(
responses.GET,
client.get_url("SET", "GET", "single", {"id": 1}),
body=sets_new_draft_good_simulated_response,
status=200,
)
responses.add(
responses.GET,
client.get_url("SET", "GET", "multidraft"),
body=sets_single_draft_good_simulated_response,
status=200,
)
s = Set()
s.title = "New Set"
rs = client.sets.create(s)
sets_amount = 0
for _set in client.sets.list_drafts():
sets_amount += 1
assert sets_amount == 1
assert rs is s
assert rs.publish_to_catalog_services == False
assert isinstance(s.group, Group)
assert len(responses.calls) == 3
@responses.activate
def test_set_create(client):
responses.add(
responses.POST,
client.get_url("SET", "POST", "create"),
body=sets_single_good_simulated_response,
status=201,
adding_headers={
"Location": "https://test.koordinates.com/services/api/v1/sets/933/"
},
)
responses.add(
responses.GET,
client.get_url("SET", "GET", "single", {"id": 933}),
body=sets_single_good_simulated_response,
status=200,
)
s = Set()
s.title = "test title"
s.description = "description"
s.group = 141
s.items = [
"https://test.koordinates.com/services/api/v1/layers/4226/",
"https://test.koordinates.com/services/api/v1/layers/4228/",
"https://test.koordinates.com/services/api/v1/layers/4227/",
"https://test.koordinates.com/services/api/v1/layers/4061/",
"https://test.koordinates.com/services/api/v1/layers/4147/",
"https://test.koordinates.com/services/api/v1/layers/4148/",
]
rs = client.sets.create(s)
assert rs is s
assert isinstance(s.group, Group)
assert s.group.id == 141
assert len(responses.calls) == 2
req = json.loads(responses.calls[0].request.body.decode("utf-8"))
assert len(req["items"]) == 6
assert req["group"] == 141
@responses.activate
def test_set_list_versions(client):
responses.add(
responses.GET,
client.get_url("SET_VERSION", "GET", "multi", {"id": 1}),
body=sets_multi_version_good_simulated_response,
status=200,
)
versions_amount = 0
for _version in client.sets.list_versions(1):
versions_amount += 1
assert versions_amount == 2
@responses.activate
def test_set_get_version(client):
responses.add(
responses.GET,
client.get_url("SET_VERSION", "GET", "single", {"id": 1, "version_id": 1}),
body=sets_new_draft_good_simulated_response,
status=200,
)
rs = client.sets.get_version(1, 1)
assert rs.version.id == 1
@responses.activate
def test_set_get_draft(client):
# should redirect to the draft versions
responses.add(
responses.GET,
client.get_url("SET_VERSION", "GET", "draft", {"id": 1}),
body=sets_new_draft_good_simulated_response,
status=201,
adding_headers={
"Location": "https://test.koordinates.com/services/api/v1/sets/1/"
},
)
rs = client.sets.get_draft(1)
assert rs.version.id == 1
@responses.activate
def test_set_get_published(client):
# should redirect to the published version
responses.add(
responses.GET,
client.get_url("SET_VERSION", "GET", "published", {"id": 1}),
body=sets_new_draft_good_simulated_response,
status=201,
adding_headers={
"Location": "https://test.koordinates.com/services/api/v1/sets/1/"
},
)
rs = client.sets.get_published(1)
assert rs.version.id == 1
@responses.activate
def test_set_get_create_draft(client):
responses.add(
responses.POST,
client.get_url("SET_VERSION", "POST", "create", {"id": 1}),
body=sets_new_draft_good_simulated_response,
status=200,
)
rs = client.sets.create_draft(1)
assert rs.version.id == 1
assert len(responses.calls) == 1
@responses.activate
def test_publish_single_set_version(client):
responses.add(
responses.GET,
client.get_url("SET_VERSION", "GET", "single", {"id": 5, "version_id": 10}),
body=sets_single_version_good_simulated_response,
status=200,
content_type="application/json",
)
lv = client.sets.get_version(5, 10)
assert lv.id == 5
assert lv.version.id == 10
responses.add(
responses.POST,
client.get_url("SET_VERSION", "POST", "publish", {"id": 5, "version_id": 10}),
body="",
status=201,
adding_headers={
"Location": "https://test.koordinates.com/services/api/v1/publish/10/"
},
content_type="application/json",
)
responses.add(
responses.GET,
"https://test.koordinates.com/services/api/v1/publish/10/",
body=sets_publish_version_good_simulated_response,
status=200,
content_type="application/json",
)
p = lv.publish()
assert isinstance(p, Publish)
assert p.id == 10
| {
"content_hash": "e03954aebf7cea145ce8ba2967102392",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 91,
"avg_line_length": 26.307692307692307,
"alnum_prop": 0.6159844054580896,
"repo_name": "koordinates/python-client",
"id": "915a839d52f1c40fe21bbdf3005d39c373ca492e",
"size": "7182",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_sets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1400"
},
{
"name": "Python",
"bytes": "332997"
},
{
"name": "Shell",
"bytes": "150"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project', '0009_auto_20170905_0727'),
]
operations = [
migrations.AlterField(
model_name='eoi',
name='selected_source',
field=models.CharField(blank=True, choices=[('CSO', 'CSO-Initiated'), ('UNI', 'UN-Initiated')], max_length=3, null=True),
),
]
| {
"content_hash": "2e878deb42445afea87400900c64ca8d",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 133,
"avg_line_length": 25.944444444444443,
"alnum_prop": 0.5995717344753747,
"repo_name": "unicef/un-partner-portal",
"id": "596767b122d303c068d16051fee166d327a16807",
"size": "540",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "backend/unpp_api/apps/project/migrations/0010_auto_20170905_0746.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "468629"
},
{
"name": "Dockerfile",
"bytes": "2303"
},
{
"name": "HTML",
"bytes": "49027"
},
{
"name": "JavaScript",
"bytes": "2199879"
},
{
"name": "Python",
"bytes": "1322681"
},
{
"name": "Shell",
"bytes": "4734"
},
{
"name": "Smarty",
"bytes": "751"
}
],
"symlink_target": ""
} |
import os
from gensim import corpora, models, similarities
from pprint import pprint # pretty-printer
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',level=logging.INFO)
if (os.path.exists("/tmp/deerwester.dict")):
dictionary = corpora.Dictionary.load('/tmp/deerwester.dict')
corpus = corpora.MmCorpus('/tmp/deerwester.mm')
print("Used files generated from first tutorial")
else:
print("Please run first tutorial to generate data set")
pprint(corpus.__dict__)
tfidf = models.TfidfModel(corpus) # step 1 -- initialize a model
pprint(tfidf.__dict__)
doc_bow = [(0, 1), (1, 1)]
print(tfidf[doc_bow]) # step 2 -- use the model to transform vectors
corpus_tfidf = tfidf[corpus]
for doc in corpus_tfidf:
print doc
lsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=2) # initialize an LSI transformation
pprint(lsi.__dict__)
pprint(lsi.id2word.__dict__)
corpus_lsi = lsi[corpus_tfidf] # create a double wrapper over the original corpus: bow->tfidf->fold-in-lsi
pprint(corpus_lsi.__dict__)
lsi.print_topics(2)
for doc in corpus_lsi: # both bow->tfidf and tfidf->lsi transformations are actually executed here, on the fly
print doc
lsi.save('/tmp/model.lsi') # same for tfidf, lda, ...
lsi = models.LsiModel.load('/tmp/model.lsi')
model = models.TfidfModel(corpus, normalize=True)
print model.__dict__
model = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=300)
print model.__dict__
model = models.RpModel(corpus_tfidf, num_topics=500)
print model.__dict__
model = models.LdaModel(corpus, id2word=dictionary, num_topics=100)
print model.__dict__
model = models.HdpModel(corpus, id2word=dictionary)
print model.__dict__
| {
"content_hash": "e9a711b1c077b8d75f46879bc5b5f95d",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 110,
"avg_line_length": 30.410714285714285,
"alnum_prop": 0.7345860246623606,
"repo_name": "yuyunliuhen/automatic-text-categorization",
"id": "62adccfaeb7cb08073cc5d5dae9eb4bf966c01e4",
"size": "1703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/gensim_tutorial/topics_and_transformations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37598"
},
{
"name": "Shell",
"bytes": "428"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from . import models
@admin.register(models.RemoteCalendar)
class RemoteCalendarAdmin(admin.ModelAdmin):
"""Basic remote calendar admin"""
fields = ["name", "url"]
class SubjectExclusionInline(admin.StackedInline):
model = models.SubjectExclusion
extra = 0
@admin.register(models.CalendarView)
class CalendarViewAdmin(admin.ModelAdmin):
"""Remote calendar view admin"""
inlines = [SubjectExclusionInline]
fields = ["name", "calendar", "calendar_url"]
readonly_fields = ["calendar_url"]
def calendar_url(self, o: models.CalendarView) -> str:
return o.get_absolute_url()
| {
"content_hash": "83714628f1d5f52de0fdcfd15e6ac5b2",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 58,
"avg_line_length": 24.296296296296298,
"alnum_prop": 0.7103658536585366,
"repo_name": "Hovercross/peacockhosting-apps",
"id": "3704ef2cf04a025f494d78fea14643cdf92f41ff",
"size": "656",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "calendar_filter/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "643"
},
{
"name": "HTML",
"bytes": "566"
},
{
"name": "Python",
"bytes": "59781"
}
],
"symlink_target": ""
} |
import numpy as np
import scipy.signal as ss
import astropy.io.fits as fits
import matplotlib.pyplot as plt
inpt = str(raw_input("Nome do Arquivo: "))
lc = fits.open(inpt)
bin = float(raw_input("bin size (or camera resolution): "))
# Convert to big-endian array is necessary to the lombscargle function
rate = np.array(lc[1].data["RATE"], dtype='float64')
time = np.array(lc[1].data["TIME"], dtype='float64')
time -= time.min()
# Exclue NaN values -------------------------
print ''
print 'Excluding nan and negative values...'
print ''
exclude = []
for i in xrange(len(rate)):
if rate[i] > 0:
pass
else:
exclude.append(i)
exclude = np.array(exclude)
nrate = np.delete(rate, exclude)
ntime = np.delete(time, exclude)
# --------------------------------------------
# normalize count rate
nrate -= nrate.mean()
# maximum frequecy limited by resolution
freqmax = 1.0/bin
# Ther periodogram itself
f, p = ss.periodogram(nrate, fs=freqmax)#, nfft=1500)
print 'TIME =', max(time)
# Plot lightcurve on top panel
#plt.subplot(2, 1, 1)
#plt.plot(ntime, nrate, 'bo-')
#plt.xlabel('Time [s]', fontsize=12)
#plt.ylabel('Normalized Count Rate [counts/s]', fontsize=12)
# Plot powerspectrum on bottom panel
#plt.subplot(2, 1, 2)
#plt.plot(f, p, 'b.-', label='f = {0:.3e}'.format(f[np.argmax(p)]))
#plt.xlabel('Frequency [Hz]', fontsize=12)
#plt.ylabel('Power', fontsize=12)
#plt.legend(loc='best')
# show plot
#plt.show()
#plt.plot(f, p)
plt.plot(f, p, linestyle='steps', label='T$_{{peak}}$ = {0:.3f} s'.format(1.0/f[np.argmax(p)]))
plt.xlabel('Frequency (Hz)')
plt.ylabel('Power')
plt.xlim(min(f), max(f))
plt.legend(loc='best', frameon=False)
plt.savefig("periodogram.pdf", orientation='landscape', papertype='a4',
format='pdf', bbox_inches='tight')
plt.show()
| {
"content_hash": "93f398dc0b40a66fe7fd9679fd1bf915",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 95,
"avg_line_length": 26.791044776119403,
"alnum_prop": 0.6456824512534819,
"repo_name": "evandromr/python_scitools",
"id": "d9ebc88e64a2c1b00e963744f72a3417bc7fb530",
"size": "1814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plotperiodogram.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38677"
}
],
"symlink_target": ""
} |
from sys import argv
import dbus
def kb_light_set(delta):
bus = dbus.SystemBus()
kbd_backlight_proxy = bus.get_object('org.freedesktop.UPower', '/org/freedesktop/UPower/KbdBacklight')
kbd_backlight = dbus.Interface(kbd_backlight_proxy, 'org.freedesktop.UPower.KbdBacklight')
current = kbd_backlight.GetBrightness()
maximum = kbd_backlight.GetMaxBrightness()
new = max(0, current + delta)
if new >= 0 and new <= maximum:
current = new
kbd_backlight.SetBrightness(current)
# Return current backlight level percentage
return 100 * current / maximum
if __name__ == '__main__':
if len(argv[1:]) == 1:
if argv[1] == "--up" or argv[1] == "+":
# ./kb-light.py (+|--up) to increment
print(kb_light_set(1))
elif argv[1] == "--down" or argv[1] == "-":
# ./kb-light.py (-|--down) to decrement
print(kb_light_set(-1))
else:
print("Unknown argument:", argv[1])
else:
print("Script takes exactly one argument.", len(argv[1:]), "arguments provided.") | {
"content_hash": "c7ac568ae265370f91ff8636869cf5a7",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 106,
"avg_line_length": 34.125,
"alnum_prop": 0.5952380952380952,
"repo_name": "pharpend/dotfiles",
"id": "ade9bdf20ad39f9498b1bfc51fe78bc531cc00e2",
"size": "1132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/kb-light.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3601118"
},
{
"name": "Common Lisp",
"bytes": "1252443"
},
{
"name": "Emacs Lisp",
"bytes": "14142461"
},
{
"name": "Gherkin",
"bytes": "13842"
},
{
"name": "HTML",
"bytes": "1947"
},
{
"name": "Haskell",
"bytes": "10108"
},
{
"name": "M4",
"bytes": "515"
},
{
"name": "Makefile",
"bytes": "10444"
},
{
"name": "Perl",
"bytes": "4466"
},
{
"name": "Python",
"bytes": "78701"
},
{
"name": "Racket",
"bytes": "91676"
},
{
"name": "Roff",
"bytes": "734939"
},
{
"name": "Ruby",
"bytes": "51926"
},
{
"name": "Scheme",
"bytes": "134939"
},
{
"name": "Shell",
"bytes": "263301"
},
{
"name": "Standard ML",
"bytes": "9321"
},
{
"name": "TeX",
"bytes": "129892"
},
{
"name": "Vim script",
"bytes": "1938"
}
],
"symlink_target": ""
} |
import os
import sys
import django
APP = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
PROJ_ROOT = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, APP)
DEBUG = True
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'dev.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.flatpages',
'categories',
'categories.editor',
'mptt',
'simpletext',
)
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
MEDIA_ROOT = os.path.abspath(os.path.join(PROJ_ROOT, 'media', 'uploads'))
MEDIA_URL = '/uploads/'
STATIC_ROOT = os.path.abspath(os.path.join(PROJ_ROOT, 'media', 'static'))
STATIC_URL = '/static/'
STATICFILES_DIRS = ()
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
SECRET_KEY = 'bwq#m)-zsey-fs)0#4*o=2z(v5g!ei=zytl9t-1hesh4b&-u^d'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [os.path.abspath(os.path.join(os.path.dirname(__file__), 'templates'))],
'OPTIONS': {
'debug': DEBUG,
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
}
},
]
if django.VERSION[1] > 5:
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
| {
"content_hash": "32e18af22334701a183f4d9c8e292d11",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 88,
"avg_line_length": 26,
"alnum_prop": 0.6336441336441336,
"repo_name": "miceno/django-categories",
"id": "758ed24f6a5cb17a59309fabc2fa5b9ea7746eb0",
"size": "2612",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/settings-testing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3091"
},
{
"name": "CSS",
"bytes": "17443"
},
{
"name": "HTML",
"bytes": "18445"
},
{
"name": "JavaScript",
"bytes": "28087"
},
{
"name": "Makefile",
"bytes": "3637"
},
{
"name": "Python",
"bytes": "157634"
}
],
"symlink_target": ""
} |
from collections import defaultdict
import copy
from .utils import merge
from .compat import basestring
from .exceptions import (err_exit, DXError, DXCLIError)
'''
System Requirements
+++++++++++++++++++
A module containing utility methods useful for packing and unpacking
system requirements.
'''
class SystemRequirementsDict(object):
"""
A class representing system requirements that can be passed as
"systemRequirements" to the class-xxxx/run API call (after converting
it to a dictionary with as_dict()).
"""
def __init__(self, entrypoints):
"""
Example of the entrypoints input:
{"main":
{"instanceType": "mem2_hdd2_x2"},
"other_function":
{"instanceType": "mem2_hdd2_x1",
"clusterSpec": {"type": "spark",
"version": "2.4.0",
"initialInstanceCount": 2}}}
"""
if entrypoints is not None and not isinstance(entrypoints, dict):
raise DXError("Expected entrypoints to be a dict or None")
self.entrypoints = copy.deepcopy(entrypoints)
@classmethod
def from_instance_count(cls, instance_count_arg, entrypoint="*"):
"""
Returns a SystemRequirementsDict that can be passed as a
"systemRequirements" input to job/new or run/ API calls.
The instance_count_arg should be either a:
* string or int eg. "6" or 8
* dictionary, eg. {"main": 4, "other_function": 2}
"""
try:
if instance_count_arg is None:
return cls(None)
if isinstance(instance_count_arg, basestring) or isinstance(instance_count_arg, int):
return cls({entrypoint: {"clusterSpec": {"initialInstanceCount": int(instance_count_arg)}}})
if isinstance(instance_count_arg, dict):
return cls({k: {"clusterSpec": {"initialInstanceCount": int(v)}} for k, v in instance_count_arg.items()})
raise ValueError
except ValueError:
DXError('Expected instance_count field to be either an int, string or a dict')
@classmethod
def from_instance_type(cls, instance_type_arg, entrypoint="*"):
"""
Returns SystemRequirementsDict that can be passed as a
"systemRequirements" input to job/new or run/ API calls.
The instance_type_arg should be either a:
* string, eg. mem1_ssd1_x2
* dictionary, eg. {"main": "mem2_hdd2_x2", "other_function": "mem2_hdd2_x1"}
"""
if instance_type_arg is None:
return cls(None)
if isinstance(instance_type_arg, basestring):
# By default, all entry points ("*") should use this instance type
return cls({entrypoint: {"instanceType": instance_type_arg}})
if isinstance(instance_type_arg, dict):
# instance_type is a map of entry point to instance type
return cls({fn: {"instanceType": fn_inst} for fn, fn_inst in instance_type_arg.items()})
raise DXError('Expected instance_type field to be either a string or a dict')
@classmethod
def from_sys_requirements(cls, system_requirements, _type='all'):
"""
Returns SystemRequirementsDict encapsulating system requirements.
It can extract only entrypoints with specific fields ('clusterSpec',
'instanceType', etc), depending on the value of _type.
"""
if _type not in ('all', 'clusterSpec', 'instanceType'):
raise DXError("Expected '_type' to be either 'all', 'clusterSpec', or 'instanceType'")
if _type == 'all':
return cls(system_requirements)
extracted = defaultdict(dict)
for entrypoint, req in system_requirements.items():
if _type in req:
extracted[entrypoint][_type] = req[_type]
return cls(dict(extracted))
def override_cluster_spec(self, srd):
"""
Returns SystemRequirementsDict can be passed in a "systemRequirements"
input to app-xxx/run, e.g. {'fn': {'clusterSpec': {initialInstanceCount: 3, version: "2.4.0", ..}}}
Since full clusterSpec must be passed to the API server, we need to retrieve the cluster
spec defined in app doc's systemRequirements and overwrite the field initialInstanceCount
with the value the user passed to dx run for each entrypoint.
initialInstanceCount is currently the only clusterSpec's field the user is allowed to change
at runtime.
A few scenarios when requesting instance count for different entrypoints with dx run
and the resulting merged systemRequirements (merged_cluster_spec). The bootstapScript
field here is only one of many (version, ports, etc) that should be copied from app
spec to merged_cluster_spec:
Requested: {"*": 5}
App doc: {"main": "clusterSpec": {"initialInstanceCount": 7, bootstrapScript: "x.sh"},
"other": "clusterSpec": {"initialInstanceCount": 9, bootstrapScript: "y.sh"}}
Merged: {"main": "clusterSpec": {"initialInstanceCount": 5, bootstrapScript: "x.sh"},
"other": "clusterSpec": {"initialInstanceCount": 5, bootstrapScript: "y.sh"}}
Requested: {"*": 15}
App doc: {"main": "clusterSpec": {"initialInstanceCount": 7, bootstrapScript: "x.sh"},
"other": "clusterSpec": {"initialInstanceCount": 9, bootstrapScript: "y.sh"},
"*": "clusterSpec": {"initialInstanceCount": 11, bootstrapScript: "y.sh"}}
Merged: {"main": "clusterSpec": {"initialInstanceCount": 15, bootstrapScript: "x.sh"},
"other": "clusterSpec": {"initialInstanceCount": 15, bootstrapScript: "y.sh"},
"*": "clusterSpec": {"initialInstanceCount": 15, bootstrapScript: "y.sh"}}
Requested: {"main": 12}
App doc: {"main": "clusterSpec": {"initialInstanceCount": 7, bootstrapScript: "x.sh"},
"other": "clusterSpec": {"initialInstanceCount": 9, bootstrapScript: "y.sh"}}
Merged: {"main": "clusterSpec": {"initialInstanceCount": 12, bootstrapScript: "x.sh"}}
Requested: {"main": 33}
App doc: {"*": "clusterSpec": {"initialInstanceCount": 2, bootstrapScript: "z.sh"}}
Merged: {"main": "clusterSpec": {"initialInstanceCount": 33, bootstrapScript: "z.sh"}}
Requested: {"main": 22, "*": 11}
App doc: {"*": "clusterSpec": {"initialInstanceCount": 2, bootstrapScript: "t.sh"}}
Merged: {"main": "clusterSpec": {"initialInstanceCount": 22, bootstrapScript: "t.sh"},
"*": "clusterSpec": {"initialInstanceCount": 11, bootstrapScript: "t.sh"}}
"""
merged_cluster_spec = copy.deepcopy(self.entrypoints)
# Remove entrypoints without "clusterSpec"
merged_cluster_spec = dict([(k, v) for k, v in merged_cluster_spec.items() if v.get("clusterSpec") is not None])
# Remove entrypoints not provided in requested instance counts
merged_cluster_spec = dict([(k, v) for k, v in merged_cluster_spec.items() if \
k in srd.entrypoints or "*" in srd.entrypoints])
# Overwrite values of self.entrypoints.clusterSpec with the ones from srd
# Named entrypoint takes precedence over the wildcard
for entry_pt, req in merged_cluster_spec.items():
merged_cluster_spec[entry_pt]["clusterSpec"].update(
srd.entrypoints.get(entry_pt, srd.entrypoints.get("*"))["clusterSpec"])
# Check if all entrypoints in srd are included in merged_cluster_spec
# (if a named entrypoint was used in srd and such an entrypoint doesn't exist
# in app sys req, we need to take the cluster spec from the app's "*", if it exists)
for entry_pt, req in srd.entrypoints.items():
if entry_pt not in merged_cluster_spec and "*" in self.entrypoints and "clusterSpec" in self.entrypoints["*"]:
merged_cluster_spec[entry_pt] = {"clusterSpec": copy.deepcopy(self.entrypoints["*"]["clusterSpec"])}
merged_cluster_spec[entry_pt]["clusterSpec"].update(req["clusterSpec"])
return SystemRequirementsDict(merged_cluster_spec)
def _add_dict_values(self, d1, d2):
"""
Merges the values of two dictionaries, which are expected to be dictionaries, e.g
d1 = {'a': {'x': pqr}}
d2 = {'a': {'y': lmn}, 'b': {'y': rst}}
will return: {'a': {'x': pqr, 'y': lmn}, 'b': {'y': rst}}.
Collisions of the keys of the sub-dictionaries are not checked.
"""
if d1 is None and d2 is None:
return None
d1 = d1 or {}
d2 = d2 or {}
added = {}
for key in set(list(d1.keys()) + list(d2.keys())):
added[key] = dict(d1.get(key, {}), **(d2.get(key, {})))
return added
def __add__(self, other):
if not isinstance(other, SystemRequirementsDict):
raise DXError("Developer error: SystemRequirementsDict expected")
added_entrypoints = self._add_dict_values(self.entrypoints, other.entrypoints)
return SystemRequirementsDict(added_entrypoints)
def as_dict(self):
return self.entrypoints
| {
"content_hash": "1079463a57da9e1ce2c435a768dc8a5c",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 122,
"avg_line_length": 48.49214659685864,
"alnum_prop": 0.6172532930252645,
"repo_name": "dnanexus/dx-toolkit",
"id": "321169f5e3679e937c65056e3a449638625c8522",
"size": "9263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/dxpy/system_requirements.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3198"
},
{
"name": "C",
"bytes": "9503"
},
{
"name": "C++",
"bytes": "1906095"
},
{
"name": "CMake",
"bytes": "25521"
},
{
"name": "Java",
"bytes": "2569488"
},
{
"name": "Makefile",
"bytes": "42074"
},
{
"name": "NSIS",
"bytes": "17861"
},
{
"name": "Nextflow",
"bytes": "955"
},
{
"name": "Perl",
"bytes": "55622"
},
{
"name": "PowerShell",
"bytes": "1442"
},
{
"name": "Python",
"bytes": "2606345"
},
{
"name": "R",
"bytes": "543112"
},
{
"name": "Ruby",
"bytes": "95466"
},
{
"name": "Shell",
"bytes": "79900"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_enclosure_info
short_description: Retrieve information about one or more Enclosures
description:
- Retrieve information about one or more of the Enclosures from OneView.
- This module was called C(oneview_enclosure_facts) before Ansible 2.9, returning C(ansible_facts).
Note that the M(oneview_enclosure_info) module no longer returns C(ansible_facts)!
version_added: "2.5"
requirements:
- hpOneView >= 2.0.1
author:
- Felipe Bulsoni (@fgbulsoni)
- Thiago Miotto (@tmiotto)
- Adriane Cardozo (@adriane-cardozo)
options:
name:
description:
- Enclosure name.
options:
description:
- "List with options to gather additional information about an Enclosure and related resources.
Options allowed: C(script), C(environmentalConfiguration), and C(utilization). For the option C(utilization),
you can provide specific parameters."
extends_documentation_fragment:
- oneview
- oneview.factsparams
'''
EXAMPLES = '''
- name: Gather information about all Enclosures
oneview_enclosure_info:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 500
no_log: true
delegate_to: localhost
register: result
- debug:
msg: "{{ result.enclosures }}"
- name: Gather paginated, filtered and sorted information about Enclosures
oneview_enclosure_info:
params:
start: 0
count: 3
sort: name:descending
filter: status=OK
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 500
no_log: true
delegate_to: localhost
register: result
- debug:
msg: "{{ result.enclosures }}"
- name: Gather information about an Enclosure by name
oneview_enclosure_info:
name: Enclosure-Name
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 500
no_log: true
delegate_to: localhost
register: result
- debug:
msg: "{{ result.enclosures }}"
- name: Gather information about an Enclosure by name with options
oneview_enclosure_info:
name: Test-Enclosure
options:
- script # optional
- environmentalConfiguration # optional
- utilization # optional
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 500
no_log: true
delegate_to: localhost
register: result
- debug:
msg: "{{ result.enclosures }}"
- debug:
msg: "{{ result.enclosure_script }}"
- debug:
msg: "{{ result.enclosure_environmental_configuration }}"
- debug:
msg: "{{ result.enclosure_utilization }}"
- name: "Gather information about an Enclosure with temperature data at a resolution of one sample per day, between two
specified dates"
oneview_enclosure_info:
name: Test-Enclosure
options:
- utilization: # optional
fields: AmbientTemperature
filter:
- startDate=2016-07-01T14:29:42.000Z
- endDate=2017-07-01T03:29:42.000Z
view: day
refresh: false
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 500
no_log: true
delegate_to: localhost
register: result
- debug:
msg: "{{ result.enclosures }}"
- debug:
msg: "{{ result.enclosure_utilization }}"
'''
RETURN = '''
enclosures:
description: Has all the OneView information about the Enclosures.
returned: Always, but can be null.
type: dict
enclosure_script:
description: Has all the OneView information about the script of an Enclosure.
returned: When requested, but can be null.
type: str
enclosure_environmental_configuration:
description: Has all the OneView information about the environmental configuration of an Enclosure.
returned: When requested, but can be null.
type: dict
enclosure_utilization:
description: Has all the OneView information about the utilization of an Enclosure.
returned: When requested, but can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase
class EnclosureInfoModule(OneViewModuleBase):
argument_spec = dict(name=dict(type='str'), options=dict(type='list'), params=dict(type='dict'))
def __init__(self):
super(EnclosureInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
self.is_old_facts = self.module._name == 'oneview_enclosure_facts'
if self.is_old_facts:
self.module.deprecate("The 'oneview_enclosure_facts' module has been renamed to 'oneview_enclosure_info', "
"and the renamed one no longer returns ansible_facts", version='2.13')
def execute_module(self):
info = {}
if self.module.params['name']:
enclosures = self._get_by_name(self.module.params['name'])
if self.options and enclosures:
info = self._gather_optional_info(self.options, enclosures[0])
else:
enclosures = self.oneview_client.enclosures.get_all(**self.facts_params)
info['enclosures'] = enclosures
if self.is_old_facts:
return dict(changed=False,
ansible_facts=info)
else:
return dict(changed=False, **info)
def _gather_optional_info(self, options, enclosure):
enclosure_client = self.oneview_client.enclosures
info = {}
if options.get('script'):
info['enclosure_script'] = enclosure_client.get_script(enclosure['uri'])
if options.get('environmentalConfiguration'):
env_config = enclosure_client.get_environmental_configuration(enclosure['uri'])
info['enclosure_environmental_configuration'] = env_config
if options.get('utilization'):
info['enclosure_utilization'] = self._get_utilization(enclosure, options['utilization'])
return info
def _get_utilization(self, enclosure, params):
fields = view = refresh = filter = ''
if isinstance(params, dict):
fields = params.get('fields')
view = params.get('view')
refresh = params.get('refresh')
filter = params.get('filter')
return self.oneview_client.enclosures.get_utilization(enclosure['uri'],
fields=fields,
filter=filter,
refresh=refresh,
view=view)
def _get_by_name(self, name):
return self.oneview_client.enclosures.get_by('name', name)
def main():
EnclosureInfoModule().run()
if __name__ == '__main__':
main()
| {
"content_hash": "5048ba7ef791e0139e787b35f7bb26f5",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 119,
"avg_line_length": 31.869955156950674,
"alnum_prop": 0.6309272548191923,
"repo_name": "thaim/ansible",
"id": "1649d996958fd05a6b6081fa8dbf3257855ec420",
"size": "7290",
"binary": false,
"copies": "20",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/remote_management/oneview/oneview_enclosure_info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
class Role:
"""
A class object which holds role information.
"""
def __str__(self):
return "[{id}] {name} {{{allegiance}, {verdict}}}".format(
id=self.role_id,
name=self.role_name,
allegiance=self.role_allegiance,
verdict=self.role_verdict,
)
def is_innocent(self):
if self.role_verdict == "Innocent":
return True
else:
return False
role_index = []
class TownieRole(Role):
role_id = 0
role_name = "Townie"
role_allegiance = "Town"
role_verdict = "Innocent"
role_index.append(TownieRole())
class MafiosoRole(Role):
role_id = 0
role_name = "Mafioso"
role_allegiance = "Mafia"
role_verdict = "Guilty"
role_index.append(MafiosoRole())
class SheriffRole(Role):
role_id = 0
role_name = "Sheriff"
role_allegiance = "Town"
role_verdict = "Innocent"
role_index.append(SheriffRole())
class NurseRole(Role):
role_id = 0
role_name = "Nurse"
role_allegiance = "Town"
role_verdict = "Innocent"
role_index.append(NurseRole())
class GodfatherRole(Role):
role_id = 0
role_name = "Godfather"
role_allegiance = "Mafia"
role_verdict = "Innocent"
role_index.append(GodfatherRole())
for ii in range(len(role_index)):
role_index[ii].role_id = ii
def town_power_roles():
# list of all avaiable townie power roles
role_list = [
NurseRole(),
SheriffRole(),
]
return role_list
def mafia_power_roles():
# list of all avaiable mafia power roles
role_list = [
GodfatherRole(),
]
return role_list
def all_power_roles():
# list of all avaiable power roles
role_list = townie_power_roles()
role_list = role_list + mafia_power_roles()
return role_list | {
"content_hash": "8dd0ecc5eb5596297f31f4f321fc9c2e",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 66,
"avg_line_length": 21.929411764705883,
"alnum_prop": 0.5895922746781116,
"repo_name": "gleasoda/MAFIA",
"id": "e01edf5c9da825b1e1680f88644b3fb2ba8b57be",
"size": "1864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mafia_game/role.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11020"
}
],
"symlink_target": ""
} |
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.studio.v1.flow.engagement.engagement_context import EngagementContextList
from twilio.rest.studio.v1.flow.engagement.step import StepList
class EngagementList(ListResource):
def __init__(self, version, flow_sid):
"""
Initialize the EngagementList
:param Version version: Version that contains the resource
:param flow_sid: The SID of the Flow
:returns: twilio.rest.studio.v1.flow.engagement.EngagementList
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementList
"""
super(EngagementList, self).__init__(version)
# Path Solution
self._solution = {'flow_sid': flow_sid, }
self._uri = '/Flows/{flow_sid}/Engagements'.format(**self._solution)
def stream(self, limit=None, page_size=None):
"""
Streams EngagementInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.studio.v1.flow.engagement.EngagementInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, limit=None, page_size=None):
"""
Lists EngagementInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.studio.v1.flow.engagement.EngagementInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of EngagementInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of EngagementInstance
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementPage
"""
data = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(method='GET', uri=self._uri, params=data, )
return EngagementPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of EngagementInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of EngagementInstance
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return EngagementPage(self._version, response, self._solution)
def create(self, to, from_, parameters=values.unset):
"""
Create the EngagementInstance
:param unicode to: The Contact phone number to start a Studio Flow Engagement
:param unicode from_: The Twilio phone number to send messages or initiate calls from during the Flow Engagement
:param dict parameters: A JSON string we will add to your flow's context and that you can access as variables inside your flow
:returns: The created EngagementInstance
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementInstance
"""
data = values.of({'To': to, 'From': from_, 'Parameters': serialize.object(parameters), })
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return EngagementInstance(self._version, payload, flow_sid=self._solution['flow_sid'], )
def get(self, sid):
"""
Constructs a EngagementContext
:param sid: The SID of the Engagement resource to fetch
:returns: twilio.rest.studio.v1.flow.engagement.EngagementContext
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementContext
"""
return EngagementContext(self._version, flow_sid=self._solution['flow_sid'], sid=sid, )
def __call__(self, sid):
"""
Constructs a EngagementContext
:param sid: The SID of the Engagement resource to fetch
:returns: twilio.rest.studio.v1.flow.engagement.EngagementContext
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementContext
"""
return EngagementContext(self._version, flow_sid=self._solution['flow_sid'], sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Studio.V1.EngagementList>'
class EngagementPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the EngagementPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param flow_sid: The SID of the Flow
:returns: twilio.rest.studio.v1.flow.engagement.EngagementPage
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementPage
"""
super(EngagementPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of EngagementInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.studio.v1.flow.engagement.EngagementInstance
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementInstance
"""
return EngagementInstance(self._version, payload, flow_sid=self._solution['flow_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Studio.V1.EngagementPage>'
class EngagementContext(InstanceContext):
def __init__(self, version, flow_sid, sid):
"""
Initialize the EngagementContext
:param Version version: Version that contains the resource
:param flow_sid: Flow SID
:param sid: The SID of the Engagement resource to fetch
:returns: twilio.rest.studio.v1.flow.engagement.EngagementContext
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementContext
"""
super(EngagementContext, self).__init__(version)
# Path Solution
self._solution = {'flow_sid': flow_sid, 'sid': sid, }
self._uri = '/Flows/{flow_sid}/Engagements/{sid}'.format(**self._solution)
# Dependents
self._steps = None
self._engagement_context = None
def fetch(self):
"""
Fetch the EngagementInstance
:returns: The fetched EngagementInstance
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return EngagementInstance(
self._version,
payload,
flow_sid=self._solution['flow_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the EngagementInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete(method='DELETE', uri=self._uri, )
@property
def steps(self):
"""
Access the steps
:returns: twilio.rest.studio.v1.flow.engagement.step.StepList
:rtype: twilio.rest.studio.v1.flow.engagement.step.StepList
"""
if self._steps is None:
self._steps = StepList(
self._version,
flow_sid=self._solution['flow_sid'],
engagement_sid=self._solution['sid'],
)
return self._steps
@property
def engagement_context(self):
"""
Access the engagement_context
:returns: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextList
:rtype: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextList
"""
if self._engagement_context is None:
self._engagement_context = EngagementContextList(
self._version,
flow_sid=self._solution['flow_sid'],
engagement_sid=self._solution['sid'],
)
return self._engagement_context
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Studio.V1.EngagementContext {}>'.format(context)
class EngagementInstance(InstanceResource):
class Status(object):
ACTIVE = "active"
ENDED = "ended"
def __init__(self, version, payload, flow_sid, sid=None):
"""
Initialize the EngagementInstance
:returns: twilio.rest.studio.v1.flow.engagement.EngagementInstance
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementInstance
"""
super(EngagementInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'account_sid': payload.get('account_sid'),
'flow_sid': payload.get('flow_sid'),
'contact_sid': payload.get('contact_sid'),
'contact_channel_address': payload.get('contact_channel_address'),
'context': payload.get('context'),
'status': payload.get('status'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'url': payload.get('url'),
'links': payload.get('links'),
}
# Context
self._context = None
self._solution = {'flow_sid': flow_sid, 'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: EngagementContext for this EngagementInstance
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementContext
"""
if self._context is None:
self._context = EngagementContext(
self._version,
flow_sid=self._solution['flow_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def flow_sid(self):
"""
:returns: The SID of the Flow
:rtype: unicode
"""
return self._properties['flow_sid']
@property
def contact_sid(self):
"""
:returns: The SID of the Contact
:rtype: unicode
"""
return self._properties['contact_sid']
@property
def contact_channel_address(self):
"""
:returns: The phone number, SIP address or Client identifier that triggered this Engagement
:rtype: unicode
"""
return self._properties['contact_channel_address']
@property
def context(self):
"""
:returns: The current state of the execution flow
:rtype: dict
"""
return self._properties['context']
@property
def status(self):
"""
:returns: The status of the Engagement
:rtype: EngagementInstance.Status
"""
return self._properties['status']
@property
def date_created(self):
"""
:returns: The ISO 8601 date and time in GMT when the Engagement was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The ISO 8601 date and time in GMT when the Engagement was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def url(self):
"""
:returns: The absolute URL of the resource
:rtype: unicode
"""
return self._properties['url']
@property
def links(self):
"""
:returns: The URLs of the Engagement's nested resources
:rtype: unicode
"""
return self._properties['links']
def fetch(self):
"""
Fetch the EngagementInstance
:returns: The fetched EngagementInstance
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the EngagementInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
@property
def steps(self):
"""
Access the steps
:returns: twilio.rest.studio.v1.flow.engagement.step.StepList
:rtype: twilio.rest.studio.v1.flow.engagement.step.StepList
"""
return self._proxy.steps
@property
def engagement_context(self):
"""
Access the engagement_context
:returns: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextList
:rtype: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextList
"""
return self._proxy.engagement_context
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Studio.V1.EngagementInstance {}>'.format(context)
| {
"content_hash": "5bb074a53f361aec69b4fca7c2ed099a",
"timestamp": "",
"source": "github",
"line_count": 478,
"max_line_length": 134,
"avg_line_length": 33.88702928870293,
"alnum_prop": 0.6145203111495247,
"repo_name": "twilio/twilio-python",
"id": "2049ec28d57d5288e1f29ee9ca4bc780f579ccc5",
"size": "16213",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "twilio/rest/studio/v1/flow/engagement/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "234"
},
{
"name": "Makefile",
"bytes": "2157"
},
{
"name": "Python",
"bytes": "11241545"
}
],
"symlink_target": ""
} |
"""
Imports all submodules
"""
from __future__ import division
from __future__ import unicode_literals
__version__ = '2.0.0'
import deepchem.data
import deepchem.feat
import deepchem.hyper
import deepchem.metalearning
import deepchem.metrics
import deepchem.models
import deepchem.splits
import deepchem.trans
import deepchem.utils
import deepchem.dock
import deepchem.molnet
import deepchem.rl
| {
"content_hash": "996655aeede1b1557fcb75a23c3c8b56",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 39,
"avg_line_length": 19.85,
"alnum_prop": 0.8035264483627204,
"repo_name": "Agent007/deepchem",
"id": "b3cc75b1ac49a22d4f1a3dbc371494ed021661be",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deepchem/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16453"
},
{
"name": "HTML",
"bytes": "20618"
},
{
"name": "Jupyter Notebook",
"bytes": "59756"
},
{
"name": "Python",
"bytes": "2129306"
},
{
"name": "Shell",
"bytes": "11976"
}
],
"symlink_target": ""
} |
from flask import render_template, redirect, request, url_for, flash
from flask_login import login_user, logout_user, login_required, \
current_user
from . import auth
from .. import db
from ..models import User
from ..email import send_email
from .forms import LoginForm, RegistrationForm, ChangePasswordForm,\
PasswordResetRequestForm, PasswordResetForm, ChangeEmailForm, RenameForm
@auth.before_app_request
def before_request():
if current_user.is_authenticated:
current_user.ping()
if not current_user.confirmed \
and request.endpoint[:5] != 'auth.' \
and request.endpoint != 'static':
return redirect(url_for('auth.unconfirmed'))
@auth.route('/unconfirmed')
def unconfirmed():
if current_user.is_anonymous or current_user.confirmed:
return redirect(url_for('main.index'))
return render_template('auth/unconfirmed.html')
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or password.')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
token = user.generate_confirmation_token()
send_email(user.email, 'Confirm Your Account',
'auth/email/confirm', user=user, token=token)
flash('A confirmation email has been sent to you by email.')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
@auth.route('/confirm/<token>')
@login_required
def confirm(token):
if current_user.confirmed:
return redirect(url_for('main.index'))
if current_user.confirm(token):
flash('You have confirmed your account. Thanks!')
else:
flash('The confirmation link is invalid or has expired.')
return redirect(url_for('main.index'))
@auth.route('/confirm')
@login_required
def resend_confirmation():
token = current_user.generate_confirmation_token()
send_email(current_user.email, 'Confirm Your Account',
'auth/email/confirm', user=current_user, token=token)
flash('A new confirmation email has been sent to you by email.')
return redirect(url_for('main.index'))
@auth.route('/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.password.data
db.session.add(current_user)
flash('Your password has been updated.')
return redirect(url_for('main.index'))
else:
flash('Invalid password.')
return render_template("auth/change_password.html", form=form)
@auth.route('/reset', methods=['GET', 'POST'])
def password_reset_request():
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = PasswordResetRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
token = user.generate_reset_token()
send_email(user.email, 'Reset Your Password',
'auth/email/reset_password',
user=user, token=token,
next=request.args.get('next'))
flash('An email with instructions to reset your password has been '
'sent to you.')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/reset/<token>', methods=['GET', 'POST'])
def password_reset(token):
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = PasswordResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None:
return redirect(url_for('main.index'))
if user.reset_password(token, form.password.data):
flash('Your password has been updated.')
return redirect(url_for('auth.login'))
else:
return redirect(url_for('main.index'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/change-email', methods=['GET', 'POST'])
@login_required
def change_email_request():
form = ChangeEmailForm()
if form.validate_on_submit():
if current_user.verify_password(form.password.data):
new_email = form.email.data
token = current_user.generate_email_change_token(new_email)
send_email(new_email, 'Confirm your email address',
'auth/email/change_email',
user=current_user, token=token)
flash('An email with instructions to confirm your new email '
'address has been sent to you.')
return redirect(url_for('main.index'))
else:
flash('Invalid email or password.')
return render_template("auth/change_email.html", form=form)
@auth.route('/change-email/<token>')
@login_required
def change_email(token):
if current_user.change_email(token):
flash('Your email address has been updated.')
else:
flash('Invalid request.')
return redirect(url_for('main.index'))
@auth.route('/rename', methods=['GET', 'POST'])
@login_required
def rename():
form = RenameForm()
if form.validate_on_submit():
if current_user.verify_password(form.password.data):
current_user.username = form.new_name.data
db.session.add(current_user)
flash('Rename Succeed!')
return redirect(url_for('main.index'))
else:
flash('Invalid password.')
return render_template("auth/rename.html", form=form)
| {
"content_hash": "34d980827e674615b4be816e0209ba9e",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 78,
"avg_line_length": 36.58659217877095,
"alnum_prop": 0.6367384333486028,
"repo_name": "The-end-novel/Freedom-Web",
"id": "903e4f3e802a8949c9e86d4b0f94e19d836f2071",
"size": "6549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/auth/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2815"
},
{
"name": "HTML",
"bytes": "18503"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "62142"
}
],
"symlink_target": ""
} |
try:
import uio as io
except ImportError:
try:
import io
except ImportError:
print('SKIP')
raise SystemExit
# test __enter__/__exit__
with io.StringIO() as b:
b.write("foo")
print(b.getvalue())
| {
"content_hash": "e94d6ab23730c2039cde243bb6a2ca52",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 25,
"avg_line_length": 18.384615384615383,
"alnum_prop": 0.5774058577405857,
"repo_name": "pfalcon/micropython",
"id": "ffc0038a92fb603eb36ba6c3bb4df731631cd010",
"size": "239",
"binary": false,
"copies": "1",
"ref": "refs/heads/pfalcon",
"path": "tests/basics/io_stringio_with.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "10582"
},
{
"name": "C",
"bytes": "14095787"
},
{
"name": "C++",
"bytes": "588783"
},
{
"name": "CMake",
"bytes": "876"
},
{
"name": "JavaScript",
"bytes": "5792"
},
{
"name": "Makefile",
"bytes": "153731"
},
{
"name": "Objective-C",
"bytes": "7411"
},
{
"name": "Python",
"bytes": "1060906"
},
{
"name": "Shell",
"bytes": "16846"
}
],
"symlink_target": ""
} |
"""
polaris.auth
~~~~~~~~~~~~
:copyright: (c) 2013 Eleme, http://polaris.eleme.io
:license: MIT
Polaris user management.
"""
| {
"content_hash": "906893848a1712bdc3321e6cee6be189",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 55,
"avg_line_length": 16.333333333333332,
"alnum_prop": 0.5374149659863946,
"repo_name": "eleme/polaris",
"id": "9544e87b44522e4734db631b9a33c233068ed9aa",
"size": "147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polaris/auth/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19223"
},
{
"name": "JavaScript",
"bytes": "73762"
},
{
"name": "Python",
"bytes": "115563"
}
],
"symlink_target": ""
} |
import numpy as np
import scipy.misc as sp
import matplotlib.pyplot as plt
class GeologicalModelling:
"""
Class for creating simple parametric geological models
inputs:
------
type: 'Layered', 'Trap', 'Fault'...
dims: Dimensions of model
functions:
----------
Returns: a GeologicalModelling object
"""
def __init__(self, par):
self.type = par['type']
self.nz = par['dims'][0]
self.nx = par['dims'][1]
return
def Visualize(self, figsize=(12, 7)):
"""
Visualize models
:param figsize: Figure size
"""
fig, ax = plt.subplots(1, 2, figsize=figsize)
cax = ax[0].imshow(self.V,cmap='gray')
ax[0].set_title('V')
ax[0].axis('tight')
cax = ax[1].imshow(self.Rho)
ax[1].set_title('Rho')
ax[1].axis('tight')
return
def Save(self, filepath='./', filename='test', normV=1, normRho=1):
"""
Save models in png files
:param filepath: Path to save .png files
:param filename: Prefix to give to .png files
"""
V = (self.V / normV) * (255.)
sp.toimage(V, cmin=0, cmax=255).save(filepath + filename + '_V.png')
Rho = (self.Rho / normRho) * (255.)
sp.toimage(Rho, cmin=0, cmax=255).save(filepath + filename + '_Rho.png')
return
class LayeredModel(GeologicalModelling):
"""
Class for creating layered model
Returns: a GeologicalModelling object
"""
def __init__(self, par):
par['type'] = 'layer'
GeologicalModelling.__init__(self, par)
def Deterministic(self, ints, v, rho=np.array([])):
"""
Create layered model given deterministic parametric definition
:param ints: Size of intervals
:param v: Velocity of intervals
:param rho: Density of intervals
"""
self.int = ints
self.v = v
if len(rho)==0:
self.rho = 1000*np.ones(len(self.int))
else:
self.rho = rho
def Stochastic(self, nint, dv, drho=[], dint=[]):
"""
Create layered model given stochastic parametric definition
:param nint: Number of intervals
:param dint: Range of intervals [intmin,intmax] from which int is uniformly drawn
:param dv: Range of velocities [vmin,vmax] from which v is uniformly drawn
:param drho: Range of densities [vmin,vmax] from which v is uniformly drawn
"""
if len(dint)==0:
dint=[0,self.nz]
# draw reflector positions
nrefl = nint - 1
refl = np.floor(np.random.uniform(dint[0],dint[1],nrefl)) + 1
refl = np.sort(refl)
self.int = np.zeros(nint, dtype=np.int)
self.int[0] = np.int(refl[0])
self.int[1:-1] = np.int(np.diff(refl))
self.int[-1] = np.int(self.nz - refl[-1])
#print 'sum of ints:',np.sum(self.int)
# draw velocity and density
self.v = np.round(np.random.uniform(dv[0],dv[1],nint))
if len(drho) == 0:
self.rho = 1000 * np.ones(nint)
else:
self.rho = np.round(np.random.uniform(drho[0],drho[1],nint))
#print 'int',self.int
#print 'v',self.v
#print 'rho',self.rho
return
def Apply(self):
"""
Apply layers modelling
"""
v = self.v[0] * np.ones(self.int[0])
rho = self.rho[0]* np.ones(self.int[0])
for iint in range(1,len(self.int)):
v = np.hstack((v, self.v[iint] * np.ones(self.int[iint])))
rho = np.hstack((rho, self.rho[iint]* np.ones(self.int[iint])))
self.V = np.repeat(v[:, np.newaxis], self.nx, axis=1)
self.Rho = np.repeat(rho[:, np.newaxis], self.nx, axis=1)
class DippingModel(GeologicalModelling):
"""
Class for creating dipping layered model
Returns: a GeologicalModelling object
"""
def __init__(self, par):
par['type'] = 'dipping'
GeologicalModelling.__init__(self, par)
self.flip = False
def Deterministic(self, ints, p, vback, dv, rhoback=1000, drho=np.array([])):
"""
Create dipping model given deterministic parametric definition
:param nints: Size of intervals (at x=0)
:param p: Slopes
:param vback: Background velocity
:param dv: Changes in velocity
:param rhoback: Background density
:param drho: Changes in density
"""
self.int = ints
self.vback = vback
self.rhoback = rhoback
self.p = p
self.dv = dv
if len(drho) == 0:
self.drho = 1000 * np.ones(len(self.int))
else:
self.drho = drho
return
def Stochastic(self, nint, p, vback, dv, rhoback=[1000,1000], drho=np.array([]), dint=[], flip=True):
"""
Create dipping model given stochastic parametric definition
:param nint: Number of intervals
:param dint: Range of intervals [intmin,intmax] from which int is uniformly drawn
:param p: Slopes
:param vback: Range of background velocity [vmin ,vmax]
:param dv: Range of velocity changes [dvmin ,dvmax]
:param rhoback: Range of background density [rhomin ,rhomax]
:param drho: Range of density changes [drhomin ,drhomax]
"""
if len(dint) == 0:
dint = [0, self.nz]
# draw reflector positions
nrefl = nint - 1
refl = np.floor(np.random.uniform(dint[0],dint[1],nrefl)) + 1
refl = np.sort(refl)
self.int = np.zeros(nint, dtype=np.int)
self.int[0] = np.int(refl[0])
self.int[1:-1] = np.int(np.diff(refl))
self.int[-1] = np.int(self.nz - refl[-1])
#print 'sum of ints:',np.sum(self.int)
# draw dips positions
dips = np.random.uniform(p[0], p[1], nint)
self.p = np.sort(dips)
# draw velocity and density
self.vback = np.round(np.random.uniform(vback[0],vback[1]))
self.vback = np.round(np.random.uniform(rhoback[0],rhoback[1]))
self.dv = np.round(np.random.uniform(dv[0],dv[1],nint))
if len(drho) == 0:
self.drho = 1000 * np.ones(nint)
else:
self.drho = np.round(np.random.uniform(drho[0],drho[1],nint))
self.flip=flip
#print 'int',self.int
#print 'v',self.v
#print 'rho',self.rho
return
def Apply(self):
"""
Apply dipping layers modelling
"""
self.V = self.vback*np.ones((self.nz,self.nx))
self.Rho = self.vback*np.ones((self.nz,self.nx))
intercepts = np.cumsum(self.int)
for iint in range(len(self.int)):
V = np.zeros((self.nz,self.nx))
Rho = np.zeros((self.nz,self.nx))
for ix in range(self.nx):
intercept = int(np.round(intercepts[iint] + ix*self.p[iint]))
V[intercept:,ix] = self.dv[iint]
Rho[intercept:,ix] = self.drho[iint]
self.V = self.V + V
self.Rho = self.Rho + Rho
if self.flip==True:
if(np.random.rand()>0.5):
self.V = np.fliplr(self.V)
self.Rho = np.fliplr(self.Rho)
return
class FaultModel(GeologicalModelling):
"""
Class for creating fault model
Returns: a GeologicalModelling object
"""
def __init__(self, par):
par['type'] = 'fault'
GeologicalModelling.__init__(self, par)
self.flip = False
def Deterministic(self, ints, v, rho=np.array([]), faultlim=np.array([]), offset=1):
"""
Create layered model given deterministic parametric definition
:param ints: Size of intervals
:param v: Velocity of intervals
:param rho: Density of intervals
"""
self.int = ints
self.v = v
if len(rho)==0:
self.rho = 1000*np.ones(len(self.int))
else:
self.rho = rho
if len(rho)==0:
self.faultlim = np.array((0, self.nx-1))
else:
self.faultlim = faultlim
self.offset = offset
x0, z0 = faultlim[0], 0 # These are in _pixel_ coordinates!!
#
x1, z1 = faultlim[1], self.nz-1
self.x, self.z = np.linspace(x0, x1, self.nx, dtype=np.int), np.linspace(z0, z1, self.nz)
def Stochastic(self, nint, dv, drho=[], dint=[], dfaultlim=[], doffset=[]):
"""
Create fault model given stochastic parametric definition
:param nint: Number of intervals
:param dint: Range of intervals [intmin,intmax] from which int is uniformly drawn
:param dv: Range of velocities [vmin,vmax] from which v is uniformly drawn
:param drho: Range of densities [vmin,vmax] from which rho is uniformly drawn
:param doffset: Range of offsets [offsetmin,offsetmax] from which offset is uniformly drawn
:param dfaultlim: Range of faultlim [faultlimmin,defaultlimmax] from which faultlim is uniformly drawn
"""
if len(dint)==0:
dint=[0,self.nz]
if len(doffset)==0:
doffset=[0,self.nz-1]
if len(dfaultlim)==0:
dfaultlim=[0,self.nx-1]
# IMPORTANT
# Probably, I could just use the LayerdModel class here.
# draw reflector positions
nrefl = nint - 1
refl = np.random.randint(dint[0],dint[1],nrefl) + 1
refl = np.sort(refl)
self.int = np.zeros(nint, dtype=np.int)
self.int[0] = refl[0]
self.int[1:-1] = np.diff(refl)
self.int[-1] = self.nz - refl[-1]
#print 'sum of ints:',np.sum(self.int)
# draw velocity and density
self.v = np.round(np.random.uniform(dv[0],dv[1],nint))
if len(drho) == 0:
self.rho = 1000 * np.ones(nint)
else:
self.rho = np.round(np.random.uniform(drho[0],drho[1],nint))
#print 'int',self.int
#print 'v',self.v
#print 'rho',self.rho
# Make a line with "num" points...
x0, z0 = np.floor(np.random.uniform(dfaultlim[0],dfaultlim[1])), 0 # These are in _pixel_ coordinates!!
# x1, z1 = np.floor(np.random.uniform(dfaultlim[0],dfaultlim[1])), 99
#
x1, z1 = np.floor(np.random.uniform(x0,dfaultlim[1])), 99
self.x, self.z = np.linspace(x0, x1, self.nx, dtype=np.int), np.linspace(z0, z1, self.nz)
# Offset
self.offset = np.int(np.floor(np.random.uniform(doffset[0],doffset[1])))
return
def Apply(self):
"""
Apply dipping layers modelling
"""
v = self.v[0] * np.ones(self.int[0])
rho = self.rho[0]* np.ones(self.int[0])
for iint in range(1,len(self.int)):
v = np.hstack((v, self.v[iint] * np.ones(self.int[iint])))
rho = np.hstack((rho, self.rho[iint]* np.ones(self.int[iint])))
self.V = np.repeat(v[:, np.newaxis], self.nx, axis=1)
self.Rho = np.repeat(rho[:, np.newaxis], self.nx, axis=1)
mask0 = np.ones_like(self.V, dtype=np.int)
mask1 = np.ones_like(self.V, dtype=np.int)
for i in range(self.nz):
mask0[i, self.x[i]:] = 0
mask1 = mask1 - mask0
self.V = self.V*mask0 + np.roll(self.V, self.offset, 0)*mask1
return
class WedgeModel(GeologicalModelling):
"""
Class for creating wedge model
Returns: a GeologicalModelling object
"""
def __init__(self, par):
par['type'] = 'wedge'
GeologicalModelling.__init__(self, par)
self.flip = False
def Stochastic(self, p, vback, dv, rhoback=[1000,1000], drho=np.array([]), flip=True):
"""
Create dipping model given stochastic parametric definition
:param p: Slopes (currently single)
:param vback: Range of background velocity [vmin ,vmax]
:param dv: Range of velocity changes [dvmin ,dvmax]
:param rhoback: Range of background density [rhomin ,rhomax]
:param drho: Range of density changes [drhomin ,drhomax]
"""
# depth of first layer
self.hor_intercept = np.random.randint(np.round(0.2*self.nz), np.round(0.8*self.nz))
# start wedge at
self.start_wedge = np.random.randint(0, np.round(self.nx/2))
# draw dips positions
self.dip = np.random.uniform(p[0], p[1])
# draw velocity and density
self.vback = np.round(np.random.uniform(vback[0],vback[1]))
self.vback = np.round(np.random.uniform(rhoback[0],rhoback[1]))
self.dv = np.round(np.random.uniform(dv[0],dv[1],2))
if len(drho) == 0:
self.drho = 1000 * np.ones(2)
else:
self.drho = np.round(np.random.uniform(drho[0],drho[1],2))
self.flip=flip
#print 'int',self.int
#print 'v',self.v
#print 'rho',self.rho
return
def Apply(self):
"""
Apply wedge layers modelling
"""
self.V = self.vback*np.ones((self.nz,self.nx))
self.Rho = self.vback*np.ones((self.nz,self.nx))
V = np.zeros((self.nz,self.nx))
Rho = np.zeros((self.nz,self.nx))
for ix in range(self.nx):
if ix < self.start_wedge:
V[self.hor_intercept:,ix] = self.dv[1]
Rho[self.hor_intercept:,ix] = self.drho[1]
else:
dip_intercept = int(np.round(self.hor_intercept + (ix-self.start_wedge)*self.dip))
V[self.hor_intercept:dip_intercept,ix] = self.dv[0]
V[dip_intercept:,ix] = self.dv[1]
Rho[self.hor_intercept:dip_intercept,ix] = self.drho[0]
Rho[dip_intercept:,ix] = self.drho[1]
self.V = self.V + V
self.Rho = self.Rho + Rho
if self.flip==True:
if(np.random.rand()>0.5):
self.V = np.fliplr(self.V)
self.Rho = np.fliplr(self.Rho)
return
class TrapModel(GeologicalModelling):
"""
Class for creating trap model
Returns: a GeologicalModelling object
"""
def __init__(self, par):
par['type'] = 'trap'
GeologicalModelling.__init__(self, par)
def Deterministic(self, center_x, center_z, v, rho=np.array([]), perc = 0):
"""
Create trap model given deterministic parametric definition
:param center_x: Position of x-center of trap
:param center_z: Position of z-center(s) of trap
:param v: Velocities [above and inside] traps
:param rho: Densities [above and inside] traps
:param perc: Percentage of circle to retain in image
"""
self.center_x = center_x
self.center_z = center_z
self.radius = int(np.floor(self.nx/2*(1+perc)))
self.perc = perc
self.v = v
if len(rho) == 0:
self.rho = 1000 * np.ones(3)
else:
self.rho = rho
def Stochastic(self, nint, center_x, dcenter_z, dv, drho=[], perc = 0):
"""
Create layered model given stochastic parametric definition
:param nint: Number of intervals
:param center_x: Position of x-center of trap
:param dcenter_z: Range of circle centers [cmin,cmax] from which center_z is uniformly drawn
:param dv: Range of velocities [vmin,vmax] from which v is uniformly drawn
:param drho: Range of densities [vmin,vmax] from which v is uniformly drawn
"""
self.center_x = center_x
self.radius = int(np.floor(self.nx / 2 * (1 + perc)))
self.perc = perc
# draw center_z positions
center_z = np.floor(np.random.uniform(dcenter_z[0], dcenter_z[1], nint)) + 1
self.center_z = np.sort(center_z)
# draw velocity and density
self.v = np.round(np.random.uniform(dv[0], dv[1], nint+1))
if len(drho) == 0:
self.rho = 1000 * np.ones(nint)
else:
self.rho = np.round(np.random.uniform(drho[0], drho[1], nint+1))
# print 'int',self.int
# print 'v',self.v
# print 'rho',self.rho
return
def Apply(self):
"""
Apply trap modelling
"""
self.V = np.zeros([self.nz,self.nx+int(self.nx*self.perc)])
self.Rho = np.zeros([self.nz,self.nx+int(self.nx*self.perc)])
# above trap
#self.V [:self.center_x,:]=self.v[0]
#self.Rho[:self.center_x, :] = self.rho[0]
self.V[:,:] = self.v[0]
self.Rho[:,:] = self.rho[0]
# create trap(s)
x_test = range(self.center_x-self.radius,self.center_x+self.radius)
y_test = np.zeros((len(self.center_z), 2 * self.radius))
for itrap in range(len(self.center_z)):
for ix in range(self.center_x-self.radius,self.center_x+self.radius):
y = np.roots([1,-2*self.center_z[itrap],self.center_z[itrap]**2+(ix-self.center_x)**2-self.radius**2])[1]
if np.imag(y) !=0:
y= np.real(y)
if y<0:
y=0
y_test[itrap, ix - self.center_x-self.radius] = y
self.V [int(y):,ix] = self.v[itrap + 1]
self.Rho[int(y):,ix] = self.rho[itrap + 1]
#plt.figure()
#plt.plot(y_test.T)
#plt.gca().invert_yaxis()
# cat external areas
if self.perc > 0:
if (np.mod(self.nx,2)==0):
self.V = self.V[:,self.center_x-self.nx/2:self.center_x+self.nx/2]
self.Rho = self.Rho[:,self.center_x-self.nx/2:self.center_x+self.nx/2]
else:
self.V = self.V [:,(self.center_x-(self.nx-1)/2):(self.center_x+(self.nx-1)/2+1)]
self.Rho = self.Rho[:,(self.center_x-(self.nx-1)/2):(self.center_x+(self.nx-1)/2+1)]
#print self.V.shape
#print self.Rho.shape
if __name__ == "__main__":
filepath = '../datasets/seismic/synthetics/'
# Test deterministic method - layered model
ints = np.array([10, 20, 50, 10])
v = np.array([1500, 1800, 2000, 2500])
rho = np.array([1000, 1800, 1400, 1200])
Layers = LayeredModel({'dims': [np.sum(ints), 100], 'type': 'layer'})
Layers.Deterministic(ints,v,rho)
Layers.Apply()
Layers.Visualize(figsize=(12, 7))
filename='layer'
Layers.Save(filepath=filepath, filename=filename)
# Test stochastic method - layered model
dv = [1500,2000]
drho = [1000,1800]
nint = 3
Layers = LayeredModel({'dims': [100, 100], 'type': 'layer'})
Layers.Stochastic(nint,dv,drho)
Layers.Apply()
Layers.Visualize(figsize=(12, 7))
filename = 'layerrdn'
Layers.Save(filepath=filepath, filename=filename)
# Test deterministic method - dipping model
vback = 1500
rhoback = 1000
ints = np.array([10, 20, 50, 10])
p = np.array([0, 0.1, 0.3, -0.1])
dv = np.array([150, 100, 200, 50])
drho = np.array([100, 180, 10, 120])
Layers = DippingModel({'dims': [np.sum(ints), 100], 'type': 'dipping'})
Layers.Deterministic(ints, p, vback, dv, rhoback, drho)
Layers.Apply()
Layers.Visualize(figsize=(12, 7))
# Test stochastic method - dipping model
vback = [1500,1800]
rhoback = [1000,1200]
nint = 3
p = [0,0.2]
dv = [-150,150]
drho = [-100,100]
Layers = DippingModel({'dims': [np.sum(ints), 100], 'type': 'dipping'})
Layers.Stochastic(nint, p, vback, dv, rhoback, drho, flip=True)
Layers.Apply()
Layers.Visualize(figsize=(12, 7))
# Test deterministic method - trap model
perc = 0
center_x = 50
center_z = np.array([50, 70, 90, 110])
v = np.array([1500, 2000, 1500, 1800, 2000])
rho = np.array([1000, 1200, 1400, 1500, 1700])
Trap = TrapModel({'dims': [100, 100], 'type': 'trap'})
Trap.Deterministic(center_x, center_z, v, rho, perc)
Trap.Apply()
Trap.Visualize(figsize=(12, 7))
# Test stochastic method - trap model
perc = 0
nint = 3
center_x = 50
dcenter_z = [20, 80]
dv = [1500, 2000]
drho = [1000, 1800]
Trap = TrapModel({'dims': [100, 100], 'type': 'trap'})
Trap.Stochastic(nint, center_x, dcenter_z, dv, drho, perc=0)
Trap.Apply()
Trap.Visualize(figsize=(12, 7))
plt.show()
| {
"content_hash": "1d993750951d056aa25a3e21151c6a97",
"timestamp": "",
"source": "github",
"line_count": 695,
"max_line_length": 121,
"avg_line_length": 29.7568345323741,
"alnum_prop": 0.5480392630917267,
"repo_name": "mrava87/EAGE_Hackatoon_2017",
"id": "56cff613432d41a11ed4daffb04345ebfc4f58e8",
"size": "20681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seispy/GeologicalModelling.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "346088"
},
{
"name": "HTML",
"bytes": "28096"
},
{
"name": "JavaScript",
"bytes": "101592"
},
{
"name": "Python",
"bytes": "42146"
}
],
"symlink_target": ""
} |
from oslo.config import cfg
import octavia.common.config as config
import octavia.tests.unit.base as base
class TestConfig(base.TestCase):
def test_sanity(self):
config.init([])
config.setup_logging(cfg.CONF)
| {
"content_hash": "56c0bcb8afb859e4b2433be9e54a0201",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 38,
"avg_line_length": 21.181818181818183,
"alnum_prop": 0.7167381974248928,
"repo_name": "brandonlogan/octavia",
"id": "fcc01ee689c3f21bcb53d4d653f8c0043f15949e",
"size": "856",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "octavia/tests/unit/common/test_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "325983"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
[extensions]
todo_include_todos=True
# auto numbering for HTML and LaTeX builder.
# see: http://docs.sphinx-users.jp/markup/inline.html#cross-referencing-figures-by-figure-number
# see: http://docs.sphinx-users.jp/config.html#confval-numfig
numfig = True
numfig_format = {
'figure' : u'図 - %s',
'table' : u'表 - %s',
'code-block' : u'コード - %s',
}
numfig_secnum_depth = 2
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'XlsMapper'
copyright = u'2015-2022, T.TSUCHIE'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.2'
# The full version, including alpha/beta/rc tags.
release = '2.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
language = 'ja'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
#html_theme = 'bizstyle'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
html_theme_options = {
'collapse_navigation': False,
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
#html_theme_path = ["_themes", ]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
#html_sidebars = {
# '**': ['globaltoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html'],
#}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'XlsMapperdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'XlsMapper.tex', u'XlsMapper Documentation',
u'T.TSUCHIE', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'xlsmapper', u'XlsMapper Documentation',
[u'T.TSUCHIE'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'XlsMapper', u'XlsMapper Documentation',
u'T.TSUCHIE', 'XlsMapper', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# -- Options for manual search ---------------------------------------
html_search_language = 'ja'
html_search_options = {'type': 'sphinx.search.ja.JanomeSplitter'}
| {
"content_hash": "6fff55ff96e9e732e9983e063692c882",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 96,
"avg_line_length": 31.750865051903116,
"alnum_prop": 0.6746948561464691,
"repo_name": "mygreen/xlsmapper",
"id": "436e074c9aff30d65070d6867830f27441e7ae6c",
"size": "9620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/site/sphinx/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2286"
},
{
"name": "CSS",
"bytes": "935"
},
{
"name": "HTML",
"bytes": "1952"
},
{
"name": "Java",
"bytes": "3597495"
},
{
"name": "JavaScript",
"bytes": "967"
},
{
"name": "Makefile",
"bytes": "638"
},
{
"name": "Python",
"bytes": "9620"
},
{
"name": "Shell",
"bytes": "817"
}
],
"symlink_target": ""
} |
from pyowm.commons import exceptions
from pyowm.utils import formatting, timestamps
from pyowm.weatherapi25 import location
def uv_intensity_to_exposure_risk(uv_intensity):
# According to figures in: https://en.wikipedia.org/wiki/Ultraviolet_index
if 0.0 <= uv_intensity < 2.9:
return 'low'
elif 2.9 <= uv_intensity < 5.9:
return 'moderate'
elif 5.9 <= uv_intensity < 7.9:
return 'high'
elif 7.9 <= uv_intensity < 10.9:
return 'very high'
else:
return 'extreme'
class UVIndex:
"""
A class representing the UltraViolet Index observed in a certain location
in the world. The location is represented by the encapsulated *Location* object.
:param reference_time: GMT UNIXtime telling when the UV data have been measured
:type reference_time: int
:param location: the *Location* relative to this UV observation
:type location: *Location*
:param value: the observed UV intensity value
:type value: float
:param reception_time: GMT UNIXtime telling when the observation has
been received from the OWM Weather API
:type reception_time: int
:returns: an *UVIndex* instance
:raises: *ValueError* when negative values are provided as reception time or
UV intensity value
"""
def __init__(self, reference_time, location, value, reception_time):
if reference_time < 0:
raise ValueError("'referencetime' must be greater than 0")
self.ref_time = reference_time
self.location = location
if value < 0.0:
raise ValueError("'UV intensity must be greater than 0")
self.value = value
if reception_time < 0:
raise ValueError("'reception_time' must be greater than 0")
self.rec_time = reception_time
def reference_time(self, timeformat='unix'):
"""
Returns the GMT time telling when the UV has been observed
from the OWM Weather API
:param timeformat: the format for the time value. May be:
'*unix*' (default) for UNIX time
'*iso*' for ISO8601-formatted string in the format ``YYYY-MM-DD HH:MM:SS+00``
'*date* for ``datetime.datetime`` object instance
:type timeformat: str
:returns: an int or a str
:raises: ValueError when negative values are provided
"""
return formatting.timeformat(self.ref_time, timeformat)
def reception_time(self, timeformat='unix'):
"""
Returns the GMT time telling when the UV has been received from the API
:param timeformat: the format for the time value. May be:
'*unix*' (default) for UNIX time
'*iso*' for ISO8601-formatted string in the format ``YYYY-MM-DD HH:MM:SS+00``
'*date* for ``datetime.datetime`` object instance
:type timeformat: str
:returns: an int or a str
:raises: ValueError when negative values are provided
"""
return formatting.timeformat(self.rec_time, timeformat)
def get_exposure_risk(self):
"""
Returns a string stating the risk of harm from unprotected sun exposure
for the average adult on this UV observation
:return: str
"""
return uv_intensity_to_exposure_risk(self.value)
@classmethod
def from_dict(cls, the_dict):
"""
Parses an *UVIndex* instance out of raw JSON data. Only certain properties of the data are used: if these
properties are not found or cannot be parsed, an error is issued.
:param the_dict: the input dict
:type the_dict: dict
:returns: an *UVIndex* instance or ``None`` if no data is available
:raises: *ParseAPIResponseError* if it is impossible to find or parse the
data needed to build the result, *APIResponseError* if the input dict embeds an HTTP status error
"""
if the_dict is None:
raise exceptions.ParseAPIResponseError('Data is None')
try:
# -- reference time
reference_time = the_dict['date']
# -- reception time (now)
reception_time = timestamps.now('unix')
# -- location
lon = float(the_dict['lon'])
lat = float(the_dict['lat'])
place = location.Location(None, lon, lat, None)
# -- UV intensity
uv_intensity = float(the_dict['value'])
except KeyError:
raise exceptions.ParseAPIResponseError(''.join([__name__, ': impossible to parse UV Index']))
return UVIndex(reference_time, place, uv_intensity, reception_time)
def to_dict(self):
"""Dumps object to a dictionary
:returns: a `dict`
"""
return {"reference_time": self.ref_time,
"location": self.location.to_dict(),
"value": self.value,
"reception_time": self.rec_time}
def __repr__(self):
return "<%s.%s - reference time=%s, reception time=%s, location=%s, " \
"value=%s>" % (
__name__,
self.__class__.__name__,
self.reference_time('iso'),
self.reception_time('iso'),
str(self.location),
str(self.value))
| {
"content_hash": "83f994c6b9c7ed2af074e7df3d97836d",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 113,
"avg_line_length": 37.34965034965035,
"alnum_prop": 0.6041939711664482,
"repo_name": "csparpa/pyowm",
"id": "f84403c70c0b35a5f06401f2bea8f1cd26115243",
"size": "5388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyowm/uvindexapi30/uvindex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6699"
},
{
"name": "Makefile",
"bytes": "6758"
},
{
"name": "Python",
"bytes": "1045787"
},
{
"name": "Shell",
"bytes": "6424"
}
],
"symlink_target": ""
} |
from PyQt4 import QtGui
class ModuleModel(QtGui.QStandardItemModel):
def __init__(self, parent, dbConnection):
super(ModuleModel, self).__init__(parent)
self.dbCon = dbConnection
def populate(self):
self.clear()
cur = self.dbCon.cursor()
cur.execute("SELECT NAME FROM MODULE")
rows = cur.fetchall()
for row in rows:
item = QtGui.QStandardItem()
item.setEditable(False)
item.setSelectable(True)
item.setText(row[0])
self.appendRow(item)
self.dbCon.commit()
def fetchModuleDescription(self, name):
cur = self.dbCon.cursor()
cur.execute("SELECT DESCRIPTION FROM MODULE WHERE NAME = ?", (str(name),))
rows = cur.fetchall()
self.dbCon.commit()
return rows[0][0]
def populateUnprocessedModules(self, imageHash):
self.clear()
cur = self.dbCon.cursor()
cur.execute("SELECT NAME FROM MODULE")
rows = cur.fetchall()
all_modules = []
for row in rows:
all_modules.append(row[0])
processed_modules = []
for module in all_modules:
query = "SELECT ID FROM " + module + "_MSG WHERE DUMP_HASH=?"
cur.execute(query, (str(imageHash),))
r = cur.fetchall()
if(len(r) > 0):
if(module not in processed_modules):
processed_modules.append(module)
for m in processed_modules:
print "[processed_modules] " + m
for m in all_modules:
item = QtGui.QStandardItem()
item.setEditable(False)
item.setSelectable(False)
item.setCheckable(True)
item.setText(m)
if(m in processed_modules):
item.setCheckState(True)
item.setCheckable(False)
self.appendRow(item)
self.dbCon.commit()
def insertModule(self, name, description, table_fields):
cur = self.dbCon.cursor()
cur.execute("INSERT INTO MODULE(NAME, DESCRIPTION) VALUES (?, ?)",(name, description))
query = "CREATE TABLE " + name + "_MSG(\
ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\
DUMP_HASH BINARY(32) NOT NULL,\
CASE_NAME TEXT NOT NULL,"
for field in table_fields:
query += field + " TEXT NOT NULL,"
query += "FOREIGN KEY (DUMP_HASH) REFERENCES IMAGE(DUMP_HASH),\
FOREIGN KEY (CASE_NAME) REFERENCES FCASE(NAME))"
cur.execute(query)
self.dbCon.commit()
def deleteModule(self, row, name):
cur = self.dbCon.cursor()
cur.execute("DELETE FROM MODULE WHERE NAME=?", (str(name),))
query = "DROP TABLE IF EXISTS " + str(name) + "_MSG"
cur.execute(query)
self.dbCon.commit()
| {
"content_hash": "5b80167e2e15e9f5b06cb0bd3bcd81b4",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 88,
"avg_line_length": 27.444444444444443,
"alnum_prop": 0.6526315789473685,
"repo_name": "tiagolb/CSF",
"id": "08884545a0e8475136df4a149b00faed479089be",
"size": "2470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/models/moduleModel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "96423"
}
],
"symlink_target": ""
} |
import hashlib
from django.db import models
from django.contrib.auth.models import Group, User
from django.utils.translation import ugettext_lazy as _
from domain.models import Domain
from reporters.models import Reporter, ReporterGroup
from hq.processor import REGISTRATION_XMLNS, create_phone_user
import xformmanager.xmlrouter as xmlrouter
class OrganizationType(models.Model):
name = models.CharField(max_length=64, unique=True)
domain = models.ForeignKey(Domain)
description = models.CharField(max_length=255, null=True, blank=True)
def __unicode__(self):
return self.name
class Meta:
verbose_name = _("Organization Type")
class ReporterProfile(models.Model):
'''The profile for a reporter object. For attaching some of our
own metadata-type information to RapidSMS reporters. This is
loosely modeled on how django user profiles work.'''
reporter = models.ForeignKey(Reporter, unique=True, related_name="profile")
chw_id = models.CharField(max_length=32, null=True, blank=True, help_text="integer id")
chw_username = models.CharField(max_length=32, null=True, blank=True, help_text="chw_username in the app")
domain = models.ForeignKey(Domain)
#dmyung - we will probably need to get rid of this unless there's a really compelling reason
organization = models.ForeignKey("Organization", null=True, blank=True)
# an optional email field to the reporter ( tester)
e_mail = models.EmailField(null=True, blank=True)
# todo: eventually make these non-null.
guid = models.CharField(max_length=32, null=True, blank=True)
approved = models.BooleanField(default=False)
active = models.BooleanField(default=False)
@property
def report_identity(self):
if self.chw_username:
return self.chw_username
return str(self)
@property
def language(self):
return self.reporter.language
def send_message(self, router, msg):
return self.reporter.send_message(router, msg)
def __unicode__(self):
if self.chw_username:
return "%s (%s)" % (self.chw_username, self.chw_id)
else:
return str(self.reporter)
def __str__(self):
return unicode(self).encode('utf-8')
###### 02-11-2010 CZUE: Killing ExtUser but temporarily leaving around #######
###### In commented form for reference #######
#class ExtUser(User):
# '''Extended users, which have some additional metadata associated with them'''
#
# # these have been moved to the ReporterProfile object and
# # should be removed when data migration gets sorted out
# chw_id = models.CharField(max_length=32, null=True, blank=True, help_text="integer id")
# chw_username = models.CharField(max_length=32, null=True, blank=True, help_text="chw_username in the app")
#
# # this should be squashed by the reporter foreign key.
# # unfortunately a lot of things currently like pointing at this
# # so it'll stick around temporarily
# primary_phone = models.CharField(max_length=30, null=True, blank=True, help_text="e.g., +251912555555")
#
# domain = models.ForeignKey(Domain)
#
# # also provide org-level granularity
# organization = models.ForeignKey("Organization", null=True, blank=True)
#
# # link to the rapidsms reporter
# reporter = models.ForeignKey(Reporter, null=True, blank=True)
#
# # the value of this field should *always* reflect the value of User.password in an ideal world
# # for now, we allow null values, until such time as we want to reset everyone's password
# unsalted_password = models.CharField(_('password'), max_length = 128, null=True, help_text = \
# _("Use '[hexdigest]' or use the <a href=\"change_password/\">change password form</a>."))
#
# @property
# def report_identity(self):
# if self.chw_username == None:
# return self.__str__()
# else:
# return self.chw_username
#
# def __unicode__(self):
# if self.first_name or self.last_name:
# return "%s %s" % (self.first_name, self.last_name)
# else:
# return self.username
#
# class Meta:
# verbose_name = _("Extended User")
#
# def set_unsalted_password(self, username, password):
# # todo - integrate this with user.password
# self.unsalted_password = hashlib.sha1( username+":"+password ).hexdigest()
class Organization(models.Model):
# this should be renamed to "Group" if that term wasn't highly
# overloaded already. These really aren't organizations.
'''An Organization. Organizations are associated with domains. They also
have parent/child hierarchies. Currently an organization can have at
most 1 parent. Top-level organizations don't have a parent.
Organizations also have members and supervisors.'''
name = models.CharField(max_length=32, unique=True) #something's messed up with this (CZUE 6/9/2009: I didn't write this comment - what's messed up??)
domain = models.ForeignKey(Domain)
description = models.CharField(max_length=255, null=True, blank=True)
organization_type = models.ManyToManyField(OrganizationType)
parent = models.ForeignKey("Organization", null=True, blank=True, related_name="children")
# membership and supervision is modeled by rapidsms reporters
# and groups
members = models.ForeignKey(ReporterGroup, null=True, blank=True, related_name="members")
supervisors = models.ForeignKey(ReporterGroup, null=True, blank=True, related_name="supervisors")
class Meta:
verbose_name = _("Organization")
def __unicode__(self):
return self.name
def get_supervisors(self):
if self.supervisors is None:
return ReporterProfile.objects.none()
reps = self.supervisors.reporters.all()
return ReporterProfile.objects.filter(reporter__in=reps)
def get_members(self):
if self.members is None:
return ReporterProfile.objects.none()
members = self.members.reporters.all()
return ReporterProfile.objects.filter(reporter__in=members)
REPORT_CLASS = (
('siteadmin', 'General Site Admin'),
('supervisor', 'Organizational Supervisor'),
('member', 'Organization Member'),
('domain', 'Custom Domain Report'),
('other', 'Other Report Type'),
)
REPORT_FREQUENCY = (
('weekly', 'Weekly'),
('daily', 'Daily'),
('monthly', 'Monthly'),
('quarterly', 'Quarterly'),
)
REPORT_DELIVERY = (
('sms', 'SMS'),
('email', 'Email'),
)
class ReportSchedule(models.Model):
name = models.CharField(max_length=64)
description = models.CharField(max_length=255)
report_class = models.CharField(_('Report Class'), max_length=32, choices=REPORT_CLASS)
report_frequency = models.CharField(_('Delivery Frequency'), max_length=32, choices=REPORT_FREQUENCY)
report_delivery = models.CharField(_('Delivery Transport/Method'), max_length=32, choices=REPORT_DELIVERY)
recipient_user = models.ForeignKey(User, null=True, blank=True,
help_text=_("If this is a General Site Admin report, enter the user you want to receive this report."))
organization = models.ForeignKey(Organization, null=True, blank=True,
help_text=_("If this is an Organizational supervisor or member report, indicate the exact organization you want to report on."))
report_function = models.CharField(max_length=255, null=True, blank=True,
help_text=_("The view or other python function you want run for this report. This is necessary only for General Site admin and Other report types."))
active = models.BooleanField(default=True)
@property
def domain(self):
'''Get the domain, trying first the organization, then the user. If neither
are set, will return nothing'''
if self.organization:
return self.organization.domain
elif self.recipient_user:
return self.recipient_user.domain
return None
def __unicode__(self):
return unicode(self.name + " - " + self.report_frequency)
class BlacklistedUser(models.Model):
'''Model for a blacklisted user. Blacklisted users should be excluded from
most views of the data, including, but not limited to, charts, reports,
submission logs(?), data/tabular views, etc.'''
# this is a temporary solution until we get real reporters for everyone
# we care about.
domains = models.ManyToManyField(Domain, related_name="blacklist")
# could use reporters here, but this will keep things simple, which is
# desirable for a short-term solution
username = models.CharField(max_length=64)
# allow temporary enabling/disabling of blacklist at a global level.
active = models.BooleanField(default=True)
@classmethod
def for_domain(cls, domain):
"""Get a flat blacklist of names for a domain, useful for doing
'in' queries or simple loops."""
# NOTE: using this as a blacklist check implies a list lookup for each
# user which could eventually get inefficient. We could make this a
# hashset if desired to make this O(1)
return domain.blacklist.filter(active=True)\
.values_list('username', flat=True)
def __unicode__(self):
return "%s in %s" %\
(self.username,
",".join([domain.name for domain in self.domains.all()]))
# register our registration method, like a signal, in the models file
# to make sure this always gets bootstrapped.
# TODO: it's unclear what app reg should belong to, for now stick it in
# the blanket "hq"
xmlrouter.register(REGISTRATION_XMLNS, create_phone_user)
| {
"content_hash": "89d8fa85b29ce67e5656d9c88562c411",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 190,
"avg_line_length": 44.251063829787235,
"alnum_prop": 0.6372728146937205,
"repo_name": "commtrack/temp-aquatest",
"id": "86435f282edbc6ad59c4b4d66db50696720727ee",
"size": "10399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/hq/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "742874"
},
{
"name": "PHP",
"bytes": "2863"
},
{
"name": "Python",
"bytes": "3707591"
},
{
"name": "Shell",
"bytes": "490"
}
],
"symlink_target": ""
} |
"""
Generate header file with macros defining MicroPython version info.
This script works with Python 2.6, 2.7, 3.3 and 3.4.
"""
from __future__ import print_function
import sys
import os
import datetime
import subprocess
def get_version_info_from_git():
# Python 2.6 doesn't have check_output, so check for that
try:
subprocess.check_output
subprocess.check_call
except AttributeError:
return None
# Note: git describe doesn't work if no tag is available
try:
git_tag = subprocess.check_output(
["git", "describe", "--tag", "--dirty", "--always"],
stderr=subprocess.STDOUT,
universal_newlines=True,
).strip()
except subprocess.CalledProcessError as er:
if er.returncode == 128:
# git exit code of 128 means no repository found
return None
git_tag = ""
except OSError:
return None
try:
git_hash = subprocess.check_output(
["git", "rev-parse", "--short", "HEAD"],
stderr=subprocess.STDOUT,
universal_newlines=True,
).strip()
except subprocess.CalledProcessError:
git_hash = "unknown"
except OSError:
return None
try:
# Check if there are any modified files.
subprocess.check_call(
["git", "diff", "--no-ext-diff", "--quiet", "--exit-code"], stderr=subprocess.STDOUT
)
# Check if there are any staged files.
subprocess.check_call(
["git", "diff-index", "--cached", "--quiet", "HEAD", "--"], stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError:
git_hash += "-dirty"
except OSError:
return None
return git_tag, git_hash
def get_version_info_from_docs_conf():
with open(os.path.join(os.path.dirname(sys.argv[0]), "..", "docs", "conf.py")) as f:
for line in f:
if line.startswith("version = release = '"):
ver = line.strip().split(" = ")[2].strip("'")
git_tag = "v" + ver
return git_tag, "<no hash>"
return None
def make_version_header(filename):
# Get version info using git, with fallback to docs/conf.py
info = get_version_info_from_git()
if info is None:
info = get_version_info_from_docs_conf()
git_tag, git_hash = info
build_date = datetime.date.today()
if "SOURCE_DATE_EPOCH" in os.environ:
build_date = datetime.datetime.utcfromtimestamp(
int(os.environ["SOURCE_DATE_EPOCH"])
).date()
# Generate the file with the git and version info
file_data = """\
// This file was generated by py/makeversionhdr.py
#define MICROPY_GIT_TAG "%s"
#define MICROPY_GIT_HASH "%s"
#define MICROPY_BUILD_DATE "%s"
""" % (
git_tag,
git_hash,
build_date.strftime("%Y-%m-%d"),
)
# Check if the file contents changed from last time
write_file = True
if os.path.isfile(filename):
with open(filename, "r") as f:
existing_data = f.read()
if existing_data == file_data:
write_file = False
# Only write the file if we need to
if write_file:
print("GEN %s" % filename)
with open(filename, "w") as f:
f.write(file_data)
if __name__ == "__main__":
make_version_header(sys.argv[1])
| {
"content_hash": "c2e9e12f32cf0845b2b0f18649041395",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 96,
"avg_line_length": 28.905982905982906,
"alnum_prop": 0.5821998817267889,
"repo_name": "pfalcon/micropython",
"id": "95fa59ad6635db4da53f5666c7a0e548d080ece3",
"size": "3382",
"binary": false,
"copies": "1",
"ref": "refs/heads/pfalcon",
"path": "py/makeversionhdr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "10582"
},
{
"name": "C",
"bytes": "14095787"
},
{
"name": "C++",
"bytes": "588783"
},
{
"name": "CMake",
"bytes": "876"
},
{
"name": "JavaScript",
"bytes": "5792"
},
{
"name": "Makefile",
"bytes": "153731"
},
{
"name": "Objective-C",
"bytes": "7411"
},
{
"name": "Python",
"bytes": "1060906"
},
{
"name": "Shell",
"bytes": "16846"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.utils.translation import ugettext as _
from .exceptions import AppAlreadyInstalledException
from .utils import resolve_appinstance
PERMISSION_MSG_DELETE = _("You are not permitted to delete this Instance")
PERMISSION_MSG_GENERIC = _("You do not have permissions for this Instance.")
PERMISSION_MSG_MODIFY = _("You are not permitted to modify this Instance")
PERMISSION_MSG_METADATA = _(
"You are not permitted to modify this Instance's metadata")
PERMISSION_MSG_VIEW = _("You are not permitted to view this Instance")
def can_change_app_instance(function):
def wrap(request, *args, **kwargs):
instance_id = kwargs.get('instance_id', None)
assert instance_id
resolve_appinstance(request, instance_id, 'base.change_resourcebase',
PERMISSION_MSG_MODIFY)
return function(request, *args, **kwargs)
# wrap.__doc__ = function.__doc__
# wrap.__name__ = function.__name__
return wrap
def can_view_app_instance(function):
def wrap(request, *args, **kwargs):
instance_id = kwargs.get('instance_id', None)
assert instance_id
resolve_appinstance(request, instance_id, 'base.view_resourcebase',
PERMISSION_MSG_VIEW)
return function(request, *args, **kwargs)
# wrap.__doc__ = function.__doc__
# wrap.__name__ = function.__name__
return wrap
def restart_enabled(func):
def wrap(*args, **kwargs):
if not getattr(settings, "CARTOVIEW_TEST", False):
return func(*args, **kwargs)
return wrap
def rollback_on_failure(func):
def wrap(*args, **kwargs):
this = args[0]
try:
return func(*args, **kwargs)
except BaseException as e:
if not isinstance(e, AppAlreadyInstalledException):
if hasattr(this, '_rollback'):
this._rollback()
raise e
return wrap
| {
"content_hash": "85c296c3e3c998494863f84c4c976121",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 77,
"avg_line_length": 32.95,
"alnum_prop": 0.6322711178553364,
"repo_name": "cartologic/cartoview",
"id": "913d5541df5d5365b6c3056c408f77bd76a7ff36",
"size": "1977",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cartoview/app_manager/decorators.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "12039"
},
{
"name": "Dockerfile",
"bytes": "354"
},
{
"name": "HTML",
"bytes": "59655"
},
{
"name": "JavaScript",
"bytes": "21735"
},
{
"name": "Makefile",
"bytes": "1667"
},
{
"name": "Python",
"bytes": "200570"
},
{
"name": "Shell",
"bytes": "4100"
}
],
"symlink_target": ""
} |
from enum import IntEnum
from app import db
"""
Describes the authentication level of a user as a series of constants.
IntEnum has been chosen as this provides a nice way to use < and > for permission checking.
"""
class AuthLevel(IntEnum):
UNKNOWN = 0,
USER = 1,
ADMIN = 2
"""
A class that represents the schema of a User as a MongoEngine document.
"""
class User(db.Document):
# Personal details
firstName = db.StringField(min_length = 2, max_length = 20, required = True)
lastName = db.StringField(min_length = 2, max_length = 30, required = True)
# Login credentials
username = db.StringField(min_length = 4, max_length = 20, required = True)
password = db.StringField(required = True)
# Email
email = db.EmailField(required = True)
# Authentication-level
authLevel = db.IntField(required = True)
meta = {
'allow_inheritance' : True,
'indexes' : [
{
'fields' : ['username'],
'unique' : True
},
{
'fields' : ['email'],
'unique' : True
}
]
}
| {
"content_hash": "ed5268ba34bf49e5ef4bbee3328d9363",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 91,
"avg_line_length": 26.068181818181817,
"alnum_prop": 0.5832606800348736,
"repo_name": "Zillolo/mana-vault",
"id": "ec59da7e80a205f6d4c2c675dafcb84c14f8f17e",
"size": "1147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/mod_auth/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "475"
},
{
"name": "HTML",
"bytes": "7376"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "16235"
}
],
"symlink_target": ""
} |
INFINITY = float('inf')
NEGATIVE_INFINITY = -INFINITY
class IntervalSet:
__slots__ = ('intervals', 'size')
def __init__(self, intervals, disjoint=False):
self.intervals = intervals
if not disjoint:
self.intervals = union_overlapping(self.intervals)
self.size = sum(i.size for i in self.intervals)
def __repr__(self):
return repr(self.intervals)
def __iter__(self):
return iter(self.intervals)
def __nonzero__(self):
return self.size != 0
def __sub__(self, other):
return self.intersect( other.complement() )
def complement(self):
complementary = []
cursor = NEGATIVE_INFINITY
for interval in self.intervals:
if cursor < interval.start:
complementary.append( Interval(cursor, interval.start) )
cursor = interval.end
if cursor < INFINITY:
complementary.append( Interval(cursor, INFINITY) )
return IntervalSet(complementary, disjoint=True)
def intersect(self, other): #XXX The last major bottleneck. Factorial-time hell.
# Then again, this function is entirely unused...
if (not self) or (not other):
return IntervalSet([])
#earliest = max(self.intervals[0].start, other.intervals[0].start)
#latest = min(self.intervals[-1].end, other.intervals[-1].end)
#mine = [i for i in self.intervals if i.start >= earliest and i.end <= latest]
#theirs = [i for i in other.intervals if i.start >= earliest and i.end <= latest]
intersections = [x for x in (i.intersect(j)
for i in self.intervals
for j in other.intervals)
if x]
return IntervalSet(intersections, disjoint=True)
def intersect_interval(self, interval):
intersections = [x for x in (i.intersect(interval)
for i in self.intervals)
if x]
return IntervalSet(intersections, disjoint=True)
def union(self, other):
return IntervalSet( sorted(self.intervals + other.intervals) )
class Interval:
__slots__ = ('start', 'end', 'tuple', 'size')
def __init__(self, start, end):
if end - start < 0:
raise ValueError("Invalid interval start=%s end=%s" % (start, end))
self.start = start
self.end = end
self.tuple = (start, end)
self.size = self.end - self.start
def __eq__(self, other):
return self.tuple == other.tuple
def __hash__(self):
return hash( self.tuple )
def __cmp__(self, other):
return cmp(self.start, other.start)
def __len__(self):
raise TypeError("len() doesn't support infinite values, use the 'size' attribute instead")
def __nonzero__(self):
return self.size != 0
def __repr__(self):
return '<Interval: %s>' % str(self.tuple)
def intersect(self, other):
start = max(self.start, other.start)
end = min(self.end, other.end)
if end > start:
return Interval(start, end)
def overlaps(self, other):
earlier = self if self.start <= other.start else other
later = self if earlier is other else other
return earlier.end >= later.start
def union(self, other):
if not self.overlaps(other):
raise TypeError("Union of disjoint intervals is not an interval")
start = min(self.start, other.start)
end = max(self.end, other.end)
return Interval(start, end)
def union_overlapping(intervals):
"""Union any overlapping intervals in the given set."""
disjoint_intervals = []
for interval in intervals:
if not disjoint_intervals or interval.start > disjoint_intervals[-1].end:
disjoint_intervals.append(interval)
elif interval.end > disjoint_intervals[-1].end:
disjoint_intervals[-1] = Interval(disjoint_intervals[-1].start, interval.end)
return disjoint_intervals
| {
"content_hash": "59281c45d048bfd1a9ec7d8a8513aac1",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 94,
"avg_line_length": 28.643939393939394,
"alnum_prop": 0.639513356254959,
"repo_name": "slackhappy/graphite-web",
"id": "1413f83b90b16eecc4baa9b9d6cc97f773558b5a",
"size": "3781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webapp/graphite/intervals.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "4716046"
},
{
"name": "Perl",
"bytes": "857"
},
{
"name": "Python",
"bytes": "589294"
},
{
"name": "Ruby",
"bytes": "1950"
},
{
"name": "Shell",
"bytes": "3978"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from pushit.constants import DEFAULT_ALIAS
from pushit.logger import Logger
from pushit.utils import loading
# Set up a logger for the app
logger = Logger.get_logger(name="pushit")
if not hasattr(settings, "PUSHIT_CONNECTIONS"):
raise ImproperlyConfigured("The 'PUSHIT_CONNECTIONS' setting is required.")
if DEFAULT_ALIAS not in settings.PUSHIT_CONNECTIONS:
raise ImproperlyConfigured(
"The default alias '%s' must be included in the PUSHIT_CONNECTIONS setting." % DEFAULT_ALIAS)
# Load the connections
connections = loading.ConnectionHandler(settings.PUSHIT_CONNECTIONS)
| {
"content_hash": "640fae3d28e490a678f116db39ce28f5",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 101,
"avg_line_length": 33.68181818181818,
"alnum_prop": 0.7894736842105263,
"repo_name": "rhblind/django-pushit",
"id": "87de25d7ead996beecd320521cdc12787dbe2f5d",
"size": "766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pushit/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6790"
},
{
"name": "Python",
"bytes": "46138"
},
{
"name": "Shell",
"bytes": "6715"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django.http import HttpResponse
from simplewebmentions.helpers import is_valid_target
def dummy_webmention(request, *args, **kwargs):
match = is_valid_target(target, request)
if match:
return HttpResponse('webmention allowed', status=200)
| {
"content_hash": "353a7aee3f7d38c32cd69697b6753a92",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 61,
"avg_line_length": 31.4,
"alnum_prop": 0.7197452229299363,
"repo_name": "emilbjorklund/django-simplewebmentions",
"id": "8f783bc70dc3b2d3310b7689daaa356dc7c77d48",
"size": "314",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/testapp/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20466"
}
],
"symlink_target": ""
} |
import argparse
import sys
from runroo import clusterSSH
from runroo import qsubSSH
def formatCommandClusterSSH(c_dict, l):
if 'n_ct' in c_dict:
t = c_dict['n_ct']
l.append(t)
else:
print('Error, please check the formatting in the Commandline, command \'n_ct\' not found')
sys.exit()
if 'node' in c_dict:
t = c_dict['node']
l.append(t)
else:
print('Error, please check the formatting in the Commandline , command \'node\' not found')
if 'nodeNM' in c_dict:
t = c_dict['nodeNM']
l.append(t)
else:
print('Error, please check the formatting in the Commandline , command \'nodeNM\' not found')
return l
def formatCommandQsubSSH(c_dict, l):
if 'n_ct' in c_dict:
t = c_dict['n_ct']
l.append(t)
else:
print('Error, please check the formatting in the Commandline, command \'n_ct\' not found')
sys.exit()
if 'allocationName' in c_dict:
t = c_dict['allocationName']
l.append(t)
else:
print('Error, please check the formatting in the Commandline , command \'allocationName\' not found')
if 'wallTime' in c_dict:
t = c_dict['wallTime']
l.append(t)
else:
print('Error, please check the formatting in the Commandline , command \'wallTime\' not found')
if 'queueName' in c_dict:
t = c_dict['queueName']
l.append(t)
else:
print('Error, please check the formatting in the Commandline , command \'queueName\' not found')
return l
def formatCommandSingleNodeSSH(c_dict, l):
if 'n_ct' in c_dict:
t = c_dict['n_ct']
l.append(t)
else:
print('Error, please check the formatting in the Commandline, command \'n_ct\' not found')
sys.exit()
return l
def formatCommandClusterLSF(c_dict, l):
if 'n_ct' in c_dict:
t = c_dict['n_ct']
l.append(t)
else:
print('Error, please check the formatting in the Commandline, command \'n_ct\' not found')
sys.exit()
if 'queueName' in c_dict:
t = c_dict['queueName']
l.append(t)
else:
print('Error, please check the formatting in the Commandline, command \'queueName\' not found')
sys.exit()
if 'jobName' in c_dict:
t = c_dict['jobName']
l.append(t)
else:
print('Error, please check the formatting in the Commandline, command \'jobName\' not found')
sys.exit()
if 'projectName' in c_dict:
t = c_dict['projectName']
l.append(t)
else:
print('Error, please check the formatting in the Commandline, command \'projectName\' not found')
sys.exit()
if 'wallTime' in c_dict:
t = c_dict['wallTime'] # format wallTime for LSF as NN not NN:NN:NN
l.append(t)
else:
print('Error, please check the formatting in the Commandline, command \'wallTime\' not found')
sys.exit()
return l
def parseSimpleCommandList(s_list):
listlen = len(s_list)
ct = 0
commandsList = []
while ct < listlen:
s_list_row = s_list[ct]
if s_list_row[0] == '#':
continue
else:
commandsList.append(s_list_row)
ct += 1
def parseComplexCommandList(c_list):
listlen = len(c_list)
ct = 0
commandsList = []
while ct < listlen:
c_list_row = c_list[ct]
c_list_row_items = c_list_row.split(',')
if len(c_list_row_items) == 1:
commandsList.append(c_list_row_items)
else:
c_list_row_dict = dict()
def inputStartCommandFile(f):
l = []
with open(f, 'r') as cF:
for i in cF:
i = i.rstrip('\r\n')
l.append(i)
return l
def formatDescription():
print('Options for the command file:')
print('1) No command file: create a text document with the folowing\n###\nNONE ')
print('2) Typical commandFile:\n\n###\ninputFile=\n')
print('3) ###\ncommandFormat= # can be one of: \'clusterSSH, qsubSSH, clusterLSF, single-node-SSH\'\n')
print('### \n# clusterSSH:')
print('node=,n_ct=,nodeNM=\'\'')
print('### \n# qsubSSH:')
print('n_ct=,allocationName=,wallTime=,queueName=)
print('###\n clusterLSF:')
print('n_ct=,queueName=,jobName=,projectName=,wallTime=)
print('###\n single-node-SSH:')
print('n_ct=)
def parseStartCommandFile(l):
lRange = len(l)
l_1 = l[1]
parsedCommandList = []
l1_split = l_1.split('=')
try:
tmpVal = l1_split[1]
except IndexError:
if l1_split[0] == 'NONE':
return (0, [])
else:
print('Error, check formatting in commandsFile')
for i in l:
print(i)
sys.exit()
cvalue = ""
cvalue_list = []
cvalue_returnList = []
rowCt = 0
for i in xrange(0, lRange):
iRow = l[i]
if iRow[0] == '#':
continue
else: # 'clusterSSH, qsubSSH, clusterLSF, single-node-SSH'
if rowCt == 0:
iRow_split = iRow.split('=')
if iRow_split[1] == 'clusterSSH':
cvalue = iRow_split[1]
elif iRow_split[1] == 'qsubSSH':
cvalue = iRow_split[1]
elif iRow_split[1] == 'clusterLSF':
cvalue = iRow_split[1]
elif iRow_split[1] == 'single-node-SSH':
cvalue = iRow_split[1]
else:
print('Error, please check command line of commands File')
sys.exit()
rowCt += 2
elif rowCt == 2:
cvalue_tmp = dict()
iRow_split = iRow_split(',')
cvalue_list.append(cvalue)
for v in iRow_split:
v_tmp = v.split('=')
cvalue_tmp[v_tmp[0]] = v_tmp[1]
if cvalue == 'clusterSSH': # n_ct, node, nodeNM
cvalue_returnList = formatCommandClusterSSH(cvalue_tmp, cvalue_list)
elif cvalue == 'qsubSSH': # n_ct, allocationName, wallTime, queueName
cvalue_returnList = formatCommandQsubSSH(cvalue_tmp, cvalue_list)
elif cvalue == 'clusterLSF': # n_ct, queueName, jobName, projectName, wallTime
cvalue_returnList = formatCommandClusterLSF(cvalue_tmp, cvalue_list)
elif cvalue == 'single-node-SSH': # n_ct
cvalue_returnList = formatCommandSingleNodeSSH(cvalue_tmp, cvalue_list)
else:
print('Error, action command in command file not recognized.')
sys.exit()
rowCt += 2
else:
continue
return (1, cvalue_returnList)
def main():
parser = argparse.ArgumentParser(description='Remote Organizational and Operational Tool: Root')
parser.add_argument('-a', '--action', choices=['check', 'start', 'stop', 'restart'], help='check monitors a run in progress, start begins a new run, stop halts a run, restart restarts a previously stopped run')
parser.add_argument('-i', '--inputFile', help='input file, its use is dependent on the action. Ignored for \'check\' and \'stop\' actions.')
parser.add_argument('-f', '--commandfile', help='file with formatted commands for the desired action. Note that this is REQUIRED, even if commandline arguments will be provided.')
parser.add_argument('-c', '--commandline', help='commandline arguments added directly to the program, not recommended.')
parser.add_argument('-s', '--show', help='show format description for command file')
args = parser.parse_args()
if args.show:
formatDescription()
sys.exit()
if args.action == 'check':
# code stub, implementation incomplete
print(args.action)
sys.exit()
if not args.commandfile:
print('No command file found, hope you know what you\'re doing! Attempting to monitor run with the provided parameters')
else:
print('Checking command file before proceeding.')
cFile = inputStartCommandFile(args.commandfile)
checkRes = parseStartCommandFile(cFile)
if checkRes == 1:
#
else:
# proceed with no commandsFile
elif args.action == 'stop':
# code stub, implementation incomplete
print(args.action)
sys.exit()
if not args.commandfile:
print('No command file found, hope you know what you\'re doing! Attempting to halt run with the provided parameters')
else:
print('Checking command file before proceeding.')
cFile = inputStartCommandFile(args.commandfile)
checkRes = parseStartCommandFile(cFile)
if checkRes[0] == 1:
#
else:
# proceed with no commandsFile
elif args.action == 'restart':
# code stub, implementation incomplete
print(args.action)
sys.exit()
if not args.commandFile:
print('No command file has been found, and a command file is required for the restart action. If you are ABSOLUTELY sure that you do not want to use a command file, create a text file with ####\nNONE as the command file.')
sys.exit()
else:
print('Using command file ')
print(args.commandFile)
cFile = inputStartCommandFile(args.commandfile)
checkRes = parseStartCommandFile(cFile)
if not args.inputFile:
print('No input file found, please check settings.')
sys.exit()
else:
print('Using input file ')
print(args.inputFile)
if checkRes[0] == 1:
#
elif args.commandline:
#
else:
print('Sorry, the command file was not read, and commands were not readable via commandline. Please chack the formatting and retry.\n\nNote that a command file will always be checked first, and to force commandline use you must add the line\n\n ###\nNONE \n\n to a command file')
sys.exit()
elif args.action == 'start':
if not args.commandFile:
print('No command file has been found, and a command file is required for the start action. If you are ABSOLUTELY sure that you do not want to use a command file, create a text file with ####\nNONE as the command file.')
sys.exit()
else:
print('Using command file ')
print(args.commandFile)
print('for start action')
cFile = inputStartCommandFile(args.commandfile)
checkRes = parseStartCommandFile(cFile)
if not args.inputFile:
print('No input file found, please check settings.')
sys.exit()
else:
print('Using input file ')
print(args.inputFile)
print('for start action')
if checkRes[0] == 1:
args4Commands = checkRes[0]
if args4Commands[0] == 'clusterSSH':
clusterSSH(args.inputFile, args4Commands[1], args4Commands[2],args4Commands[3])
elif args4Commands[0] == 'qsubSSH':
qsubSSH(args.inputFile, args4Commands[1], args4Commands[2], args4Commands[3], args4Commands[4])
elif args4Commands[0] == 'clusterLSF':
print('Not implemented yet')
sys.exit()
clusterLSF(args.inputFile, args4Commands[1], args4Commands[2], args4Commands[3], args4Commands[4], args4Commands[5])
elif args4Commands[0] == 'single-node-SSH':
print('Not implemented yet')
sys.exit()
singleNodeSSH(args.inputFile, args4Commands[1])
elif args.commandline:
# parse arguments, determine action type, and start action
else:
print('Sorry, the command file was not read, and commands were not readable via commandline. Please chack the formatting and retry.\n\nNote that a command file will always be checked first, and to force commandline use you must add the line\n\n ###\nNONE \n\n to a command file')
sys.exit()
else:
print('error, unrecognized input!')
sys.exit()
if __name__ == "__main__":
main()
| {
"content_hash": "da70652fd1fb44817ae51c863630b319",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 292,
"avg_line_length": 39.523809523809526,
"alnum_prop": 0.5742168674698795,
"repo_name": "disulfidebond/ROO",
"id": "0af76c8d6e5391bb99bacdb37b1b04355cb4ad60",
"size": "12469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runroo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20618"
},
{
"name": "Shell",
"bytes": "703"
}
],
"symlink_target": ""
} |
import os
from django.http import HttpResponse
from opentelemetry import trace
from opentelemetry.instrumentation.django import DjangoInstrumentor
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter
from azure.monitor.opentelemetry.exporter import AzureMonitorTraceExporter
# Enable instrumentation in the django library.
DjangoInstrumentor().instrument()
trace.set_tracer_provider(TracerProvider())
span_processor = BatchSpanProcessor(
AzureMonitorTraceExporter.from_connection_string(
os.environ["APPLICATIONINSIGHTS_CONNECTION_STRING"]
)
)
trace.get_tracer_provider().add_span_processor(span_processor)
def index(request):
return HttpResponse("Hello, world.")
| {
"content_hash": "274aa5e0b5029ac7646401905700b800",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 82,
"avg_line_length": 32.125,
"alnum_prop": 0.8223086900129701,
"repo_name": "Azure/azure-sdk-for-python",
"id": "e3879d27ad53b5adba8bf3441f0fc652823dc125",
"size": "865",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/monitor/azure-monitor-opentelemetry-exporter/samples/traces/django/sample/example/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""Package contenant la commande 'versions'."""
from primaires.interpreteur.commande.commande import Commande
from .ajouter import PrmAjouter
from .editer import PrmEditer
from .supprimer import PrmSupprimer
class CmdVersions(Commande):
"""Commande 'versions'.
"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "versions", "versions")
self.nom_categorie = "info"
self.schema = "(<nombre>)"
self.aide_courte = "permet de suivre les modifications"
self.aide_longue = \
"Cette commande renvoie les |ent|nombre|ff| dernières " \
"modifications enregistrées par les administrateurs. Si vous " \
"ne précisez pas de nombre, elle renvoie simplement les " \
"modifications que vous n'avez pas encore vues."
def ajouter_parametres(self):
"""Ajout des paramètres"""
prm_ajouter = PrmAjouter()
prm_editer = PrmEditer()
prm_supprimer = PrmSupprimer()
self.ajouter_parametre(prm_ajouter)
self.ajouter_parametre(prm_editer)
self.ajouter_parametre(prm_supprimer)
def interpreter(self, personnage, dic_masques):
"""Interprétation du paramètre"""
versions = type(self).importeur.information.versions
modifs = ""
if dic_masques["nombre"] is not None:
nombre = dic_masques["nombre"].nombre
if nombre > len(versions):
personnage << "|err|Le nombre précisé est supérieur au " \
"nombre de modifications.|ff|"
else:
personnage << versions.afficher(nombre)
else:
ret = versions.afficher_dernieres_pour(personnage)
if not ret:
personnage << "|att|Aucune nouvelle modification pour " \
"l'instant.|ff|"
else:
personnage << ret
| {
"content_hash": "0e5b5e196bb6ff50ec11fa6de09b49ab",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 76,
"avg_line_length": 37.094339622641506,
"alnum_prop": 0.5890132248219736,
"repo_name": "stormi/tsunami",
"id": "3bdf1929c046551eb43133606b6eaf02305e3b4c",
"size": "3540",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/information/commandes/versions/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7188300"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
} |
import pytest
from tenable_io.api.target_groups import TargetListEditRequest
from tenable_io.api.models import TargetGroup, TargetGroupList
@pytest.mark.vcr()
def test_target_groups_create(new_target_group):
assert isinstance(new_target_group, TargetGroup), u'The `create` method did not return type `TargetGroup`.'
@pytest.mark.vcr()
def test_target_groups_details(client, new_target_group):
target_group = new_target_group
details = client.target_groups_api.details(target_group.id)
assert isinstance(details, TargetGroup), u'The `details` method did not return type `TargetGroup`.'
assert details.id == target_group.id, u'Expected the `details` response to match the requested target group.'
@pytest.mark.vcr()
def test_target_groups_list(client):
target_groups = client.target_groups_api.list()
assert isinstance(target_groups, TargetGroupList), u'The `details` method did not return type `TargetGroup`.'
for group in target_groups.target_groups:
assert isinstance(group, TargetGroup), u'Expected a list of type `TargetGroup`.'
@pytest.mark.vcr()
def test_target_groups_delete(client, new_target_group):
assert client.target_groups_api.delete(new_target_group.id), u'The target group was not deleted.'
@pytest.mark.vcr()
def test_target_groups_edit(client, new_target_group):
target_group = new_target_group
edited_name = 'test_target_group_edit'
edited_group = client.target_groups_api.edit(TargetListEditRequest(name=edited_name), target_group.id)
assert isinstance(edited_group, TargetGroup), u'The `edit` method did not return type `TargetGroup`.'
assert edited_group.id == target_group.id, u'Expected the edited target group to match the requested target group.'
assert edited_group.name == edited_name, u'Expected the name to be updated.'
| {
"content_hash": "9af24659b63ee35bacc09ca95f0d6e7b",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 119,
"avg_line_length": 45.7,
"alnum_prop": 0.75054704595186,
"repo_name": "tenable/Tenable.io-SDK-for-Python",
"id": "07e2d1b8a7c46e378298b64b296fe93ed48acbf5",
"size": "1828",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/api/test_target_groups.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "459766"
}
],
"symlink_target": ""
} |
from lxml import html, etree
import requests
import os
import time
import bvglibrary as bvg
import scanlibrary as scan
import time
devices=[scan.device('192.168.2.1','Router'),scan.device('192.168.2.118','Tablet'),scan.device('192.168.2.162','MediaLaptop'),scan.device('192.168.2.195','ChristianHandy'),scan.device('192.168.2.191','ChristianDesktop')]
devices=scan.getstatus(devices)
scan.writexmlpart(devices)
localtime=time.localtime(time.time())
times, lines, destinations=bvg.request(localtime)
output=bvg.writexml(times,lines,destinations)
bvg.writeindex(output)
| {
"content_hash": "1242262b6a2ecbcc7aea2277ff4ceb62",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 220,
"avg_line_length": 35.625,
"alnum_prop": 0.7824561403508772,
"repo_name": "RoboWoodhouse/RoboButler",
"id": "ed757b1ea66955e0a33da7f6aeb178dba82df9a1",
"size": "594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "15159"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "7947"
}
],
"symlink_target": ""
} |
import numpy as np
from ..utils import _validate_type, _check_option
from ..utils.check import int_like
def combine_adjacency(*structure):
"""Create a sparse binary adjacency/neighbors matrix.
Parameters
----------
*structure : list
The adjacency along each dimension. Each entry can be:
- ndarray or sparse matrix
A square binary adjacency matrix for the given dimension.
- int
The number of elements along the given dimension. A lattice
adjacency will be generated.
Returns
-------
adjacency : scipy.sparse.coo_matrix, shape (n_features, n_features)
The adjacency matrix.
"""
from scipy import sparse
structure = list(structure)
for di, dim in enumerate(structure):
name = f'structure[{di}]'
_validate_type(dim, ('int-like', np.ndarray, sparse.spmatrix), name)
if isinstance(dim, int_like):
dim = int(dim)
# Don't add the diagonal, because we explicitly remove it later:
# dim = sparse.eye(dim, format='coo')
# dim += sparse.eye(dim.shape[0], k=1, format='coo')
# dim += sparse.eye(dim.shape[0], k=-1, format='coo')
ii, jj = np.arange(0, dim - 1), np.arange(1, dim)
edges = np.vstack([np.hstack([ii, jj]), np.hstack([jj, ii])])
dim = sparse.coo_matrix(
(np.ones(edges.shape[1]), edges), (dim, dim), float)
else:
_check_option(f'{name}.ndim', dim.ndim, [2])
if dim.shape[0] != dim.shape[1]:
raise ValueError(
f'{name} must be square, got shape {dim.shape}')
if not isinstance(dim, sparse.coo_matrix):
dim = sparse.coo_matrix(dim)
else:
dim = dim.copy()
dim.data[dim.row == dim.col] = 0. # remove diagonal, will add later
dim.eliminate_zeros()
if not (dim.data == 1).all():
raise ValueError('All adjacency values must be 0 or 1')
structure[di] = dim
# list of coo
assert all(isinstance(dim, sparse.coo_matrix) for dim in structure)
shape = np.array([d.shape[0] for d in structure], int)
n_others = np.array([np.prod(np.concatenate([shape[:di], shape[di + 1:]]))
for di in range(len(structure))], int)
n_each = np.array([dim.data.size for dim in structure], int) * n_others
n_off = n_each.sum() # off-diagonal terms
n_diag = np.prod(shape)
vertices = np.arange(n_diag).reshape(shape)
edges = np.empty((2, n_off + n_diag), int)
used = np.zeros(n_off, bool)
weights = np.empty(n_off + n_diag, float) # even though just 0/1
offset = 0
for di, dim in enumerate(structure):
s_l = [slice(None)] * len(shape)
s_r = [slice(None)] * len(shape)
s_l[di] = dim.row
s_r[di] = dim.col
assert dim.row.shape == dim.col.shape == dim.data.shape
sl = slice(offset, offset + n_each[di])
edges[:, sl] = [vertices[tuple(s_l)].ravel(),
vertices[tuple(s_r)].ravel()]
weights[sl] = np.tile(dim.data, n_others[di])
offset += n_each[di]
assert not used[sl].any()
used[sl] = True
assert used.all()
# Handle the diagonal separately at the end to avoid duplicate entries
edges[:, n_off:] = vertices.ravel()
weights[n_off:] = 1.
graph = sparse.coo_matrix((weights, edges),
(vertices.size, vertices.size))
return graph
| {
"content_hash": "2dd161ece8393442be77f9262f0e3157",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 78,
"avg_line_length": 40.85057471264368,
"alnum_prop": 0.5644344400675295,
"repo_name": "kambysese/mne-python",
"id": "c81344a8aef81fe91e57f6c5573d7a946f8b2f68",
"size": "3657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mne/stats/_adjacency.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3912"
},
{
"name": "Python",
"bytes": "5978369"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
"""Tests for `tf.data.Dataset.from_tensor_slices()."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class FromTensorSlicesTest(test_base.DatasetTestBase):
def testFromTensorSlices(self):
"""Test a dataset that represents the slices from a tuple of tensors."""
components = (
np.tile(np.array([[1], [2], [3], [4]]), 20), np.tile(
np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0])
)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
get_next = self.getNext(dataset)
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
for i in range(4):
results = self.evaluate(get_next())
for component, result_component in zip(components, results):
self.assertAllEqual(component[i], result_component)
with self.assertRaises(errors.OutOfRangeError):
results = self.evaluate(get_next())
def testFromTensorSlicesDataset(self):
dss = [dataset_ops.Dataset.range(10) for _ in range(10)]
ds = dataset_ops.Dataset.from_tensor_slices(dss)
ds = ds.flat_map(lambda x: x)
self.assertDatasetProduces(ds, expected_output=list(range(10)) * 10)
def testFromTensorSlicesDatasetInFunction(self):
dss = [dataset_ops.Dataset.range(10) for _ in range(10)]
ds = dataset_ops.Dataset.from_tensors(dss)
ds = ds.flat_map(dataset_ops.Dataset.from_tensor_slices)
ds = ds.flat_map(lambda x: x)
self.assertDatasetProduces(ds, expected_output=list(range(10)) * 10)
def testFromTensorSlicesSparse(self):
"""Test a dataset that represents the slices from a tuple of tensors."""
components = (sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 0], [2, 0]]),
values=np.array([0, 0, 0]),
dense_shape=np.array([3, 1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1], [2, 2]]),
values=np.array([1, 2, 3]),
dense_shape=np.array([3, 3])))
dataset = dataset_ops.Dataset.from_tensor_slices(components)
self.assertEqual(
[tensor_shape.TensorShape(c.dense_shape[1:]) for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
expected = [
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([1]),
dense_shape=np.array([3]))),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[1]]),
values=np.array([2]),
dense_shape=np.array([3]))),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[2]]),
values=np.array([3]),
dense_shape=np.array([3]))),
]
self.assertDatasetProduces(dataset, expected_output=expected)
def testFromTensorSlicesMixed(self):
"""Test a dataset that represents the slices from a tuple of tensors."""
components = (np.tile(np.array([[1], [2], [3]]), 20),
np.tile(np.array([[12], [13], [14]]), 22),
np.array([37.0, 38.0, 39.0]),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 0], [2, 0]]),
values=np.array([0, 0, 0]),
dense_shape=np.array([3, 1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1], [2, 2]]),
values=np.array([1, 2, 3]),
dense_shape=np.array([3, 3])))
dataset = dataset_ops.Dataset.from_tensor_slices(components)
get_next = self.getNext(dataset)
self.assertEqual([
tensor_shape.TensorShape(c.dense_shape[1:])
if sparse_tensor.is_sparse(c) else c.shape[1:] for c in components
], [shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
expected = [
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([1]),
dense_shape=np.array([3]))),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[1]]),
values=np.array([2]),
dense_shape=np.array([3]))),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[2]]),
values=np.array([3]),
dense_shape=np.array([3]))),
]
for i in range(3):
results = self.evaluate(get_next())
for component, result_component in zip(
(list(zip(*components[:3]))[i] + expected[i]), results):
self.assertValuesEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testFromTensorSlicesWithDict(self):
components = {"foo": [1, 2, 3], "bar": [[4.0], [5.0], [6.0]]}
dataset = dataset_ops.Dataset.from_tensor_slices(components)
get_next = self.getNext(dataset)
self.assertEqual(dtypes.int32,
dataset_ops.get_legacy_output_types(dataset)["foo"])
self.assertEqual(dtypes.float32,
dataset_ops.get_legacy_output_types(dataset)["bar"])
self.assertEqual((), dataset_ops.get_legacy_output_shapes(dataset)["foo"])
self.assertEqual((1,), dataset_ops.get_legacy_output_shapes(dataset)["bar"])
for i in range(3):
results = self.evaluate(get_next())
self.assertEqual(components["foo"][i], results["foo"])
self.assertEqual(components["bar"][i], results["bar"])
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testFromTensorSlicesRagged(self):
components = (
ragged_factory_ops.constant_value([[[0]], [[1]], [[2]]]),
ragged_factory_ops.constant_value([[[3]], [[4]], [[5]]]),
)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
expected = [(ragged_factory_ops.constant_value([[0]]),
ragged_factory_ops.constant_value([[3]])),
(ragged_factory_ops.constant_value([[1]]),
ragged_factory_ops.constant_value([[4]])),
(ragged_factory_ops.constant_value([[2]]),
ragged_factory_ops.constant_value([[5]]))]
self.assertDatasetProduces(dataset, expected_output=expected)
def testFromTensorSlicesMixedRagged(self):
components = (np.tile(np.array([[1], [2], [3]]),
20), np.tile(np.array([[12], [13], [14]]),
22), np.array([37.0, 38.0, 39.0]),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 0], [2, 0]]),
values=np.array([0, 0, 0]),
dense_shape=np.array([3, 1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1], [2, 2]]),
values=np.array([1, 2, 3]),
dense_shape=np.array([3, 3])),
ragged_factory_ops.constant_value([[[0]], [[1]], [[2]]]))
dataset = dataset_ops.Dataset.from_tensor_slices(components)
get_next = self.getNext(dataset)
expected = [
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([1]),
dense_shape=np.array([3])), ragged_factory_ops.constant_value([[0]
])),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[1]]),
values=np.array([2]),
dense_shape=np.array([3])), ragged_factory_ops.constant_value([[1]
])),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[2]]),
values=np.array([3]),
dense_shape=np.array([3])), ragged_factory_ops.constant_value([[2]
])),
]
for i in range(3):
results = self.evaluate(get_next())
for component, result_component in zip(
(list(zip(*components[:3]))[i] + expected[i]), results):
self.assertValuesEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testFromTensorSlicesWithUintDtypes(self):
components = (
np.tile(np.array([[0], [1]], dtype=np.uint8), 2),
np.tile(np.array([[2], [256]], dtype=np.uint16), 2),
np.tile(np.array([[4], [65536]], dtype=np.uint32), 2),
np.tile(np.array([[8], [4294967296]], dtype=np.uint64), 2),
)
expected_types = (dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64)
expected_output = [tuple([c[i] for c in components]) for i in range(2)]
dataset = dataset_ops.Dataset.from_tensor_slices(components)
self.assertEqual(expected_types,
dataset_ops.get_legacy_output_types(dataset))
self.assertDatasetProduces(dataset, expected_output)
if __name__ == "__main__":
test.main()
| {
"content_hash": "e0b7016880bdbf6a6b1df5e091def79f",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 80,
"avg_line_length": 42.39080459770115,
"alnum_prop": 0.568239334779465,
"repo_name": "chemelnucfin/tensorflow",
"id": "17b82d6f2eed9e7569e803f4f33e131dfcc2e54b",
"size": "11753",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/kernel_tests/from_tensor_slices_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4913"
},
{
"name": "Batchfile",
"bytes": "16146"
},
{
"name": "C",
"bytes": "825231"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "75313939"
},
{
"name": "CMake",
"bytes": "207856"
},
{
"name": "Dockerfile",
"bytes": "80130"
},
{
"name": "Go",
"bytes": "1670422"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "881711"
},
{
"name": "Jupyter Notebook",
"bytes": "1113647"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "853297"
},
{
"name": "Makefile",
"bytes": "109340"
},
{
"name": "Objective-C",
"bytes": "105235"
},
{
"name": "Objective-C++",
"bytes": "258793"
},
{
"name": "PHP",
"bytes": "38007"
},
{
"name": "Pascal",
"bytes": "3741"
},
{
"name": "Pawn",
"bytes": "14380"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "50825074"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4706"
},
{
"name": "Shell",
"bytes": "532610"
},
{
"name": "Smarty",
"bytes": "31460"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
} |
import rospy
from sensor_msgs.msg import BatteryState
from geometry_msgs.msg import Pose
from strands_navigation_msgs.msg import TopologicalMap
class DummyBattery(object):
"""
Publishes a dummy battery message which charges or discharges based on current topoligcal noe
"""
def __init__(self):
super(DummyBattery, self).__init__()
self._charging_points = rospy.get_param('~charging_points', ['ChargingPoint', 'ChargingPoint1', 'ChargingPoint2' ])
self._charging_poses = []
self._pose_tolerance = 0.4
self._at_charging_point = False
# run at 10hz which matches the scitos robot
self._rate = 10
# battery percent per second
self._discharge_rate = float(rospy.get_param('~discharge_rate', 0.0003)) / self._rate
self._recharge_rate = float(rospy.get_param('~recharge_rate', 0.0030)) / self._rate
# battery charge between 0 and 1.0
self._current_level = 1.0
self._get_charging_points_poses()
self._pose_sub = rospy.Subscriber('robot_pose', Pose, self._pose_cb)
self._battery_pub = rospy.Publisher('battery_state', BatteryState, queue_size = 1)
def _get_charging_points_poses(self):
topo_map = rospy.wait_for_message("/topological_map", TopologicalMap).nodes
for entry in topo_map:
if entry.name in self._charging_points:
self._charging_poses.append(entry.pose)
def _pose_cb(self, msg):
for pose in self._charging_poses:
if abs(msg.position.x - pose.position.x) < self._pose_tolerance and abs(msg.position.y - pose.position.y) < self._pose_tolerance:
self._at_charging_point = True
return
self._at_charging_point = False
def run(self):
rate = rospy.Rate(self._rate)
msg = BatteryState()
while not rospy.is_shutdown():
msg.header.stamp = rospy.get_rostime()
msg.present = self._at_charging_point
if msg.present:
msg.power_supply_status = BatteryState.POWER_SUPPLY_STATUS_CHARGING
self._current_level = min(1, self._current_level + self._recharge_rate)
else:
msg.power_supply_status = BatteryState.POWER_SUPPLY_STATUS_DISCHARGING
self._current_level = max(0, self._current_level - self._discharge_rate)
msg.percentage = float(self._current_level)
self._battery_pub.publish(msg)
rate.sleep()
if __name__ == '__main__':
rospy.init_node("dummy_battery")
db = DummyBattery()
db.run()
| {
"content_hash": "428452fc07cd20310afb8d86e8124aa9",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 141,
"avg_line_length": 37.53521126760563,
"alnum_prop": 0.6082551594746717,
"repo_name": "strands-project/strands_executive_behaviours",
"id": "1de4fe10d21160364ac3ca96253b9d8415a8c97d",
"size": "2688",
"binary": false,
"copies": "1",
"ref": "refs/heads/hydro-devel",
"path": "routine_behaviours/scripts/dummy_battery.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CMake",
"bytes": "10114"
},
{
"name": "Python",
"bytes": "64301"
}
],
"symlink_target": ""
} |
import getpass
import requests
import json
import os
import socket
import commands
import sys
HOSTNAME = socket.gethostname()
DEFAULT_CONF = {
HOSTNAME: {
"nodename": HOSTNAME,
"host": "127.0.0.1",
"port": "9200",
}
}
def get_monitor_es_conf(filename):
"""
{
"es0": {"host": "hostname-es0", "nodename": "es0", "port": "9200"},
"es1": {"host": "hostname-es1", "nodename": "es1", "port": "8200"}
}
"""
if os.path.exists(filename):
with open(filename, 'r') as fd:
monitor_es_conf = json.loads(fd.read().strip())
fd.close()
return monitor_es_conf
else:
return DEFAULT_CONF
def get_data(host, port, uri):
try:
url = "http://%s:%s/%s" % (host,port,uri)
response = requests.request("GET", url)
return json.loads(response.text)
except Exception:
print '1'
sys.exit(1)
if __name__ == '__main__':
monitor_es_conf = get_monitor_es_conf("/opt/etc/monitor-es.json")
data = [{"{#IDX}":"_all"}]
for node, node_conf in monitor_es_conf.items():
#get _stats info
cluster_stats = get_data(node_conf['host'], node_conf['port'], '_stats')
for idx in cluster_stats['indices']:
data.append({"{#IDX}":idx})
#get once
break
print json.dumps({"data": data})
| {
"content_hash": "5df303fd71cbecbc9d9734e3c38a9701",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 80,
"avg_line_length": 24.70909090909091,
"alnum_prop": 0.5533480500367918,
"repo_name": "sapling/es",
"id": "66882aae4e89296296756cbf0a29b89cdd201a5a",
"size": "1405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zabbix/scripts/monitor-es.cluster-idx.discovery.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7396"
},
{
"name": "Shell",
"bytes": "4988"
}
],
"symlink_target": ""
} |
'''
Created on Jul 30, 2015
@author: Mikhail
'''
import unittest
import re
from json_file_generator import MyOwnJSONProcessing as json_processing
from json_file_generator import __version__ as json_file_generator_version
from unittest.case import skip, skipIf
class GenerateAndLoadJSONTestUpdateFour(unittest.TestCase):
expected_data = {}
@classmethod
def setUpClass(cls):
print "{} for {} has been called".format(cls.setUpClass.__name__, cls.__name__)
cls.expected_data = json_processing.generate_data_for_json_obj()
def setUp(self):
print "{} for {} has been called".format(self.setUp.__name__, self._testMethodName)
self.file_name = "generate_and_load_unittest.json"
self.original_name = json_processing.generate_json_file_with_data(self.file_name, self.expected_data)
def tearDown(self):
print "{} for {} has been called".format(self.tearDown.__name__, self._testMethodName)
@classmethod
def tearDownClass(cls):
print "{} for {} has been called".format(cls.tearDownClass.__name__, cls.__name__)
json_processing.clean_up()
def testGenerateAndLoadJSONValidKeys(self):
print "Processing file {}".format(self.original_name)
actual_data = json_processing.load_data_from_json_file(self.original_name)
for exp_key in self.expected_data.keys():
self.assertTrue(actual_data.has_key(exp_key), "Expected key '{}' has not been found in loaded json".format(exp_key))
for act_key in actual_data.keys():
self.assertTrue(self.expected_data.has_key(act_key), "Loaded key '{}' has not been found in dumped json".format(act_key))
# General version of skip
@skip("old functionality")
def testGenerateAndLoadJSONValidKeysHasOnlyLetters1(self):
print "Processing file {}".format(self.original_name)
actual_data = json_processing.load_data_from_json_file(self.original_name)
for act_key in actual_data.keys():
self.assertTrue(re.match("[^a-zA-Z]", act_key) is None, "Key should contains only alpha symbols: {}".format(act_key))
# Version of skip that check version of our json_file_generator
@skipIf(json_file_generator_version > 1, "This functionality is not supported in this version on the json file generator")
def testGenerateAndLoadJSONValidKeysHasOnlyLetters2(self):
print "Processing file {}".format(self.original_name)
actual_data = json_processing.load_data_from_json_file(self.original_name)
for act_key in actual_data.keys():
self.assertIsNone(re.match("[^a-zA-Z]", act_key), "Key should contains only alpha symbols: {}".format(act_key))
def testGenerateAndLoadJSONValidValues(self):
print "Processing file {}".format(self.original_name)
actual_data = json_processing.load_data_from_json_file(self.original_name)
for exp_key, exp_value in self.expected_data.items():
self.assertEquals(exp_value, actual_data.get(exp_key), "Dictionaries have different values '{}' for first and '{}' for second for the same key".format(exp_value, actual_data.get(exp_key)))
for act_key, act_value in actual_data.items():
self.assertEquals(act_value, self.expected_data.get(act_key), "Dictionaries have different values '{}' for first and '{}' for second for the same key".format(act_value, self.expected_data.get(act_key)))
def testGenerateAndLoadJSONForInvalidFile(self):
"""
This test checks that valid exception will be raised if required file will not be found
"""
invalid_name = "invalid_" + self.original_name
print "Processing file {}".format(invalid_name)
with self.assertRaises(IOError) as io_exception:
# attempt to read file that doesn't exist
json_processing.load_data_from_json_file(invalid_name)
self.assertEqual(io_exception.exception.errno, 2)
self.assertEqual(io_exception.exception.strerror, 'No such file or directory')
if __name__ == "__main__":
unittest.main(verbosity=2) | {
"content_hash": "f0792b3db54221caaeff234ee191a423",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 214,
"avg_line_length": 51.2625,
"alnum_prop": 0.6832479882955377,
"repo_name": "MikeLaptev/sandbox_python",
"id": "a3fa6ac2f7ad804220436c5db3f65457a257c432",
"size": "4101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mera/unittest_example/generate_and_load_unittest_update_four.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Nginx",
"bytes": "591"
},
{
"name": "Python",
"bytes": "190991"
},
{
"name": "Shell",
"bytes": "713"
}
],
"symlink_target": ""
} |
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import wavefront_api_client
from wavefront_api_client.models.azure_activity_log_configuration import AzureActivityLogConfiguration # noqa: E501
from wavefront_api_client.rest import ApiException
class TestAzureActivityLogConfiguration(unittest.TestCase):
"""AzureActivityLogConfiguration unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAzureActivityLogConfiguration(self):
"""Test AzureActivityLogConfiguration"""
# FIXME: construct object with mandatory attributes with example values
# model = wavefront_api_client.models.azure_activity_log_configuration.AzureActivityLogConfiguration() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "9483b5401fc91a2b3e70208e6a41aa1e",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 409,
"avg_line_length": 36.55263157894737,
"alnum_prop": 0.7415406767458603,
"repo_name": "wavefrontHQ/python-client",
"id": "d6bf6555df0959663b9400776502a721d8b779df",
"size": "1406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_azure_activity_log_configuration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4642252"
},
{
"name": "Shell",
"bytes": "3458"
}
],
"symlink_target": ""
} |
import csv
import string
import itertools
import codecs
try:
import pandas as pd
has_pandas = True
except ImportError:
has_pandas = False
from sqlalchemy import and_
from sqlalchemy.exc import SQLAlchemyError
from . import util
from .database import db
from .ingredient import Categories, Ingredient, display_name_mappings
from .logger import get_logger
log = get_logger(__name__)
def get_barstock_instance(csv_list, use_sql=False, bar_id=None, include_all=False):
""" Factory for getting the right, initialized barstock
"""
if isinstance(csv_list, str):
csv_list = [csv_list]
if use_sql or not has_pandas:
if bar_id is None:
raise ValueError("Valid bar object required for sql barstock")
barstock = Barstock_SQL()
barstock.load_from_csv(csv_list, bar_id)
return barstock
elif has_pandas:
return Barstock_DF.load(csv_list, include_all=include_all)
else:
raise NotImplementedError("No pandas and not using sql version of Barstock")
def _calculated_columns(thing):
""" Given an object with the required fields,
calculate and add the other fields
"""
thing['Size (oz)'] = util.convert_units(thing['Size (mL)'], 'mL', 'oz')
thing['$/mL'] = thing['Price Paid'] / thing['Size (mL)']
thing['$/cL'] = thing['Price Paid']*10 / thing['Size (mL)']
thing['$/oz'] = thing['Price Paid'] / thing['Size (oz)']
def _update_computed_fields(row):
""" Uses clean names
"""
row.type_ = row.Type.lower()
try:
row.Size_oz = util.convert_units(row.Size_mL, 'mL', 'oz')
row.Cost_per_mL = row.Price_Paid / row.Size_mL
row.Cost_per_cL = row.Price_Paid*10 / row.Size_mL
row.Cost_per_oz = row.Price_Paid / row.Size_oz
except ZeroDivisionError:
log.warning("Ingredient missing size field: {}".format(row))
class DataError(Exception):
pass
class Barstock(object):
pass
class Barstock_SQL(Barstock):
def __init__(self, bar_id):
self.bar_id = bar_id
def load_from_csv(self, csv_list, bar_id, replace_existing=True):
"""Load the given CSVs
if replace_existing is True, will replace the whole db for this bar
bar_id is the active bar
"""
if replace_existing:
rows_deleted = Ingredient.query.filter_by(bar_id=bar_id).delete()
db.session.commit()
log.info("Dropped {} rows for {} table".format(rows_deleted, Ingredient.__tablename__))
for csv_file in csv_list:
# utf-8-sig handles the BOM, /uffef
with open(csv_file, encoding='utf-8-sig') as fp:
reader = csv.DictReader(fp)
for row in reader:
try:
self.add_row(row, bar_id)
except DataError as e:
log.warning(e)
def add_row(self, row, bar_id):
""" where row is a dict of fields from the csv
returns the Model object for the updated/inserted row"""
if not row.get('Ingredient', row.get('Type')) or not row.get('Kind', row.get('Bottle')):
log.debug("Primary key (Ingredient, Kind) missing, skipping ingredient: {}".format(row))
return
clean_row = {display_name_mappings[k]['k'] : display_name_mappings[k]['v'](v)
for k,v in row.items()
if k in display_name_mappings}
try:
ingredient = Ingredient(bar_id=bar_id, **clean_row)
row = Ingredient.query.filter_by(bar_id=ingredient.bar_id,
Kind=ingredient.Kind, Type=ingredient.Type).one_or_none()
if row: # update
for k, v in clean_row.items():
row[k] = v
_update_computed_fields(row)
db.session.commit()
return row
else: # insert
_update_computed_fields(ingredient)
db.session.add(ingredient)
db.session.commit()
return ingredient
except SQLAlchemyError as err:
msg = "{}: on row: {}".format(err, clean_row)
raise DataError(msg)
def get_all_kind_combinations(self, specifiers):
""" For a given list of ingredient specifiers, return a list of lists
where each list is a specific way to make the drink
e.g. Martini passes in ['gin', 'vermouth'], gets [['Beefeater', 'Noilly Prat'], ['Knickerbocker', 'Noilly Prat']]
"""
kind_lists = [[b.Kind for b in self.slice_on_type(i)] for i in specifiers]
opts = itertools.product(*kind_lists)
return opts
def get_kind_abv(self, ingredient):
return self.get_kind_field(ingredient, 'ABV')
def get_kind_category(self, ingredient):
return self.get_kind_field(ingredient, 'Category')
def cost_by_kind_and_volume(self, ingredient, amount, unit='oz'):
per_unit = self.get_kind_field(ingredient, 'Cost_per_{}'.format(unit))
return per_unit * amount
def get_kind_field(self, ingredient, field):
if field not in list(Ingredient.__table__.columns.keys()):
raise AttributeError("get-kind-field '{}' not a valid field in the data".format(field))
return self.get_ingredient_row(ingredient)[field]
def get_ingredient_row(self, ingredient):
if ingredient.kind is None:
raise ValueError("ingredient {} has no kind specified".format(ingredient.__repr__()))
row = self.slice_on_type(ingredient)
if len(row) > 1:
raise ValueError('{} has multiple entries in the input data!'.format(ingredient.__repr__()))
elif len(row) < 1:
raise ValueError('{} has no entry in the input data!'.format(ingredient.__repr__()))
return row[0]
# TODO sqlqlchemy exception decorator?
def slice_on_type(self, specifier):
""" Return query results for rows matching an ingredient specifier
Handles several special cases
"""
type_ = specifier.ingredient.lower()
if type_ in ['rum', 'whiskey', 'whisky', 'tequila', 'vermouth']:
type_ = 'whisk' if type_ == 'whisky' else type_
filter_ = Ingredient.type_.like('%{}%'.format(type_))
elif type_ == 'any spirit':
spirits = ['dry gin', 'rye whiskey', 'bourbon whiskey', 'amber rum', 'dark rum', 'white rum', 'genever', 'cognac', 'brandy', 'aquavit']
filter_ = Ingredient.type_.in_(spirits)
elif type_ == 'bitters':
filter_ = Ingredient.Category == 'Bitters'
else:
filter_ = Ingredient.type_ == type_
if specifier.kind:
filter_ = and_(filter_, Ingredient.Kind == specifier.kind)
filter_ = and_(filter_, Ingredient.bar_id == self.bar_id, Ingredient.In_Stock == True)
return Ingredient.query.filter(filter_).all()
def to_csv(self):
cols = list(Ingredient.__table__.columns.keys())
result = [','.join(cols)]
for row in Ingredient.query.all():
result.append(','.join([str(row[col]) for col in cols]))
return '\n'.join(result)
class Barstock_DF(Barstock):
""" Wrap up a csv of kind info with some helpful methods
for data access and querying
"""
def __init__(self, df):
self.df = df
def get_all_kind_combinations(self, specifiers):
""" For a given list of ingredient specifiers, return a list of lists
where each list is a specific way to make the drink
e.g. Martini passes in ['gin', 'vermouth'], gets [['Beefeater', 'Noilly Prat'], ['Knickerbocker', 'Noilly Prat']]
"""
kind_lists = [self.slice_on_type(i)['Kind'].tolist() for i in specifiers]
opts = itertools.product(*kind_lists)
return opts
def get_kind_abv(self, ingredient):
return self.get_kind_field(ingredient, 'ABV')
def get_kind_category(self, ingredient):
return self.get_kind_field(ingredient, 'Category')
def cost_by_kind_and_volume(self, ingredient, amount, unit='oz'):
per_unit = self.get_kind_field(ingredient, '$/{}'.format(unit))
return per_unit * amount
def get_kind_field(self, ingredient, field):
if field not in self.df.columns:
raise AttributeError("get-kind-field '{}' not a valid field in the data".format(field))
return self.get_ingredient_row(ingredient).at[0, field]
def get_ingredient_row(self, ingredient):
if ingredient.kind is None:
raise ValueError("ingredient {} has no kind specified".format(ingredient.__repr__()))
row = self.slice_on_type(ingredient)
if len(row) > 1:
raise ValueError('{} has multiple entries in the input data!'.format(ingredient.__repr__()))
elif len(row) < 1:
raise ValueError('{} has no entry in the input data!'.format(ingredient.__repr__()))
return row
def slice_on_type(self, specifier):
type_ = specifier.ingredient.lower()
if type_ in ['rum', 'whiskey', 'whisky', 'tequila', 'vermouth']:
type_ = 'whisk' if type_ == 'whisky' else type_
matching = self.df[self.df['type'].str.contains(type_)]
elif type_ == 'any spirit':
matching = self.df[self.df.type.isin(['dry gin', 'rye whiskey', 'bourbon whiskey', 'amber rum', 'dark rum', 'white rum', 'genever', 'brandy', 'aquavit'])]
#matching = self.df[self.df['Category'] == 'Spirit']
elif type_ == 'bitters':
matching = self.df[self.df['Category'] == 'Bitters']
else:
matching = self.df[self.df['type'] == type_]
if specifier.kind:
return matching[matching['Kind'] == specifier.kind].reset_index(drop=True)
else:
return matching
def sorted_df(self):
return self.df.sort_values(['Category','Type','Price Paid'])
def add_row(self, row):
""" where row is a dict """
_calculated_columns(row)
row = {k:[v] for k,v in row.items()}
row = pd.DataFrame.from_dict(row)
self.df = pd.concat([self.df, row])
@classmethod
def load(cls, barstock_csv, include_all=False):
if isinstance(barstock_csv, str):
barstock_csv = [barstock_csv]
# TODO validate columns, merge duplicates
df = pd.concat([pd.read_csv(filename) for filename in barstock_csv])
df = df.drop_duplicates(['Type', 'Kind'])
df = df.dropna(subset=['Type'])
# convert money columns to floats
for col in [col for col in df.columns if '$' in col or 'Price' in col]:
df[col] = df[col].replace('[\$,]', '', regex=True).astype(float)
df = df.fillna(0)
_calculated_columns(df)
df['type'] = list(map(string.lower, df['Type']))
df['Category'] = pd.Categorical(df['Category'], Categories)
# drop out of stock items
if not include_all:
#log debug how many dropped
df = df[df["In Stock"] > 0]
return cls(df)
| {
"content_hash": "ebad9ac30dc25977c6ede35f96625966",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 166,
"avg_line_length": 41.05185185185185,
"alnum_prop": 0.5930169613857813,
"repo_name": "twschum/mix-mind",
"id": "3555fadd8cfd7ab251ea74b1622ced56a253774e",
"size": "11084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mixmind/barstock.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5158"
},
{
"name": "HTML",
"bytes": "51762"
},
{
"name": "JavaScript",
"bytes": "24582"
},
{
"name": "Python",
"bytes": "151829"
},
{
"name": "Shell",
"bytes": "259"
}
],
"symlink_target": ""
} |
from ..server_utils import SetUpPythonPath
SetUpPythonPath()
from .test_utils import ( Setup,
BuildRequest,
PathToTestFile,
StopOmniSharpServer,
WaitUntilOmniSharpServerReady )
from webtest import TestApp
from nose.tools import with_setup, eq_
from hamcrest import ( assert_that,
contains,
contains_string,
has_entries,
has_entry,
has_items,
empty,
equal_to )
from ..responses import NoDiagnosticSupport
from .. import handlers
import bottle
import httplib
from pprint import pprint
bottle.debug( True )
@with_setup( Setup )
def Diagnostics_ClangCompleter_ZeroBasedLineAndColumn_test():
app = TestApp( handlers.app )
contents = """
void foo() {
double baz = "foo";
}
// Padding to 5 lines
// Padding to 5 lines
"""
event_data = BuildRequest( compilation_flags = ['-x', 'c++'],
event_name = 'FileReadyToParse',
contents = contents,
filetype = 'cpp' )
results = app.post_json( '/event_notification', event_data ).json
assert_that( results,
contains(
has_entries( {
'kind': equal_to( 'ERROR' ),
'text': contains_string( 'cannot initialize' ),
'ranges': contains( has_entries( {
'start': has_entries( {
'line_num': 3,
'column_num': 16,
} ),
'end': has_entries( {
'line_num': 3,
'column_num': 21,
} ),
} ) ),
'location': has_entries( {
'line_num': 3,
'column_num': 10
} ),
'location_extent': has_entries( {
'start': has_entries( {
'line_num': 3,
'column_num': 10,
} ),
'end': has_entries( {
'line_num': 3,
'column_num': 13,
} ),
} )
} ) ) )
@with_setup( Setup )
def Diagnostics_ClangCompleter_SimpleLocationExtent_test():
app = TestApp( handlers.app )
contents = """
void foo() {
baz = 5;
}
// Padding to 5 lines
// Padding to 5 lines
"""
event_data = BuildRequest( compilation_flags = ['-x', 'c++'],
event_name = 'FileReadyToParse',
contents = contents,
filetype = 'cpp' )
results = app.post_json( '/event_notification', event_data ).json
assert_that( results,
contains(
has_entries( {
'location_extent': has_entries( {
'start': has_entries( {
'line_num': 3,
'column_num': 3,
} ),
'end': has_entries( {
'line_num': 3,
'column_num': 6,
} ),
} )
} ) ) )
@with_setup( Setup )
def Diagnostics_ClangCompleter_PragmaOnceWarningIgnored_test():
app = TestApp( handlers.app )
contents = """
#pragma once
struct Foo {
int x;
int y;
int c;
int d;
};
"""
event_data = BuildRequest( compilation_flags = ['-x', 'c++'],
event_name = 'FileReadyToParse',
contents = contents,
filepath = '/foo.h',
filetype = 'cpp' )
response = app.post_json( '/event_notification', event_data ).json
assert_that( response, empty() )
@with_setup( Setup )
def Diagnostics_CsCompleter_ZeroBasedLineAndColumn_test():
app = TestApp( handlers.app )
app.post_json( '/ignore_extra_conf_file',
{ 'filepath': PathToTestFile( '.ycm_extra_conf.py' ) } )
filepath = PathToTestFile( 'testy', 'Program.cs' )
contents = open( filepath ).read()
event_data = BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
event_name = 'FileReadyToParse' )
results = app.post_json( '/event_notification', event_data )
WaitUntilOmniSharpServerReady( app, filepath )
event_data = BuildRequest( filepath = filepath,
event_name = 'FileReadyToParse',
filetype = 'cs',
contents = contents )
results = app.post_json( '/event_notification', event_data ).json
assert_that( results,
contains(
has_entries( {
'kind': equal_to( 'ERROR' ),
'text': contains_string(
"Unexpected symbol `}'', expecting identifier" ),
'location': has_entries( {
'line_num': 11,
'column_num': 2
} ),
'location_extent': has_entries( {
'start': has_entries( {
'line_num': 11,
'column_num': 2,
} ),
'end': has_entries( {
'line_num': 11,
'column_num': 2,
} ),
} )
} ) ) )
StopOmniSharpServer( app, filepath )
@with_setup( Setup )
def Diagnostics_CsCompleter_MultipleSolution_test():
app = TestApp( handlers.app )
app.post_json( '/ignore_extra_conf_file',
{ 'filepath': PathToTestFile( '.ycm_extra_conf.py' ) } )
filepaths = [ PathToTestFile( 'testy', 'Program.cs' ),
PathToTestFile( 'testy-multiple-solutions',
'solution-named-like-folder',
'testy',
'Program.cs' ) ]
lines = [ 11, 10 ]
for filepath, line in zip( filepaths, lines ):
contents = open( filepath ).read()
event_data = BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
event_name = 'FileReadyToParse' )
results = app.post_json( '/event_notification', event_data )
WaitUntilOmniSharpServerReady( app, filepath )
event_data = BuildRequest( filepath = filepath,
event_name = 'FileReadyToParse',
filetype = 'cs',
contents = contents )
results = app.post_json( '/event_notification', event_data ).json
assert_that( results,
contains(
has_entries( {
'kind': equal_to( 'ERROR' ),
'text': contains_string(
"Unexpected symbol `}'', expecting identifier" ),
'location': has_entries( {
'line_num': line,
'column_num': 2
} ),
'location_extent': has_entries( {
'start': has_entries( {
'line_num': line,
'column_num': 2,
} ),
'end': has_entries( {
'line_num': line,
'column_num': 2,
} ),
} )
} ) ) )
StopOmniSharpServer( app, filepath )
@with_setup( Setup )
def GetDetailedDiagnostic_ClangCompleter_Works_test():
app = TestApp( handlers.app )
contents = """
struct Foo {
int x // semicolon missing here!
int y;
int c;
int d;
};
"""
diag_data = BuildRequest( compilation_flags = ['-x', 'c++'],
line_num = 3,
contents = contents,
filetype = 'cpp' )
event_data = diag_data.copy()
event_data.update( {
'event_name': 'FileReadyToParse',
} )
app.post_json( '/event_notification', event_data )
results = app.post_json( '/detailed_diagnostic', diag_data ).json
assert_that( results,
has_entry( 'message', contains_string( "expected ';'" ) ) )
@with_setup( Setup )
def GetDetailedDiagnostic_ClangCompleter_Multiline_test():
app = TestApp( handlers.app )
contents = """
struct Foo {
Foo(int z) {}
};
int main() {
Foo foo("goo");
}
"""
diag_data = BuildRequest( compilation_flags = ['-x', 'c++'],
line_num = 7,
contents = contents,
filetype = 'cpp' )
event_data = diag_data.copy()
event_data.update( {
'event_name': 'FileReadyToParse',
} )
app.post_json( '/event_notification', event_data )
results = app.post_json( '/detailed_diagnostic', diag_data ).json
assert_that( results,
has_entry( 'message', contains_string( "\n" ) ) )
@with_setup( Setup )
def GetDetailedDiagnostic_CsCompleter_Works_test():
app = TestApp( handlers.app )
app.post_json( '/ignore_extra_conf_file',
{ 'filepath': PathToTestFile( '.ycm_extra_conf.py' ) } )
filepath = PathToTestFile( 'testy', 'Program.cs' )
contents = open( filepath ).read()
event_data = BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
event_name = 'FileReadyToParse' )
app.post_json( '/event_notification', event_data )
WaitUntilOmniSharpServerReady( app, filepath )
app.post_json( '/event_notification', event_data )
diag_data = BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
line_num = 11,
column_num = 2 )
results = app.post_json( '/detailed_diagnostic', diag_data ).json
assert_that( results,
has_entry(
'message',
contains_string(
"Unexpected symbol `}'', expecting identifier" ) ) )
StopOmniSharpServer( app, filepath )
@with_setup( Setup )
def GetDetailedDiagnostic_JediCompleter_DoesntWork_test():
app = TestApp( handlers.app )
diag_data = BuildRequest( contents = "foo = 5",
line_num = 2,
filetype = 'python' )
response = app.post_json( '/detailed_diagnostic',
diag_data,
expect_errors = True )
eq_( response.status_code, httplib.INTERNAL_SERVER_ERROR )
assert_that( response.json,
has_entry( 'exception',
has_entry( 'TYPE', NoDiagnosticSupport.__name__ ) ) )
@with_setup( Setup )
def Diagnostics_ClangCompleter_FixIt_Available_test():
app = TestApp( handlers.app )
contents = open( PathToTestFile( 'FixIt_Clang_cpp11.cpp' ) ).read()
event_data = BuildRequest( contents = contents,
event_name = 'FileReadyToParse',
filetype = 'cpp',
compilation_flags = [ '-x', 'c++',
'-std=c++03',
'-Wall',
'-Wextra',
'-pedantic' ] )
response = app.post_json( '/event_notification', event_data ).json
pprint( response )
assert_that( response, has_items (
has_entries( {
'location' : has_entries( { 'line_num': 16, 'column_num': 3 } ),
'text': equal_to( 'switch condition type \'A\' '
'requires explicit conversion to \'int\''),
'fixit_available' : True
} ),
has_entries( {
'location' : has_entries( { 'line_num': 11, 'column_num': 3 } ),
'text': equal_to('explicit conversion functions are a C++11 extension'),
'fixit_available' : False
} ),
) )
| {
"content_hash": "d0ab1cc7115c59f8b48a5c73061036ce",
"timestamp": "",
"source": "github",
"line_count": 367,
"max_line_length": 79,
"avg_line_length": 33.961852861035425,
"alnum_prop": 0.45972400513478817,
"repo_name": "akrehl/dotfiles",
"id": "301d4dd355147e0acfd127599d921375fd2b4b5f",
"size": "13202",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "vim/bundle/YouCompleteMe/third_party/ycmd/ycmd/tests/diagnostics_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "162804"
},
{
"name": "Shell",
"bytes": "78487"
},
{
"name": "VimL",
"bytes": "49728"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailforms', '0003_capitalizeverbose'),
('wagtailcore', '0040_page_draft_title'),
('wagtailredirects', '0005_capitalizeverbose'),
('cms', '0035_auto_20171017_1459'),
]
operations = [
migrations.CreateModel(
name='FormPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('thank_you_text', wagtail.wagtailcore.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.RemoveField(
model_name='ideaformpage',
name='page_ptr',
),
migrations.AlterField(
model_name='formfield',
name='field_type',
field=models.CharField(choices=[('singleline', 'Single line text'), ('multiline', 'Multi-line text'), ('email', 'Email'), ('number', 'Number'), ('url', 'URL'), ('checkbox', 'Checkbox'), ('checkboxes', 'Checkboxes'), ('dropdown', 'Drop down'), ('multiselect', 'Multiple select'), ('radio', 'Radio buttons'), ('date', 'Date'), ('datetime', 'Date/time'), (b'document', b'Upload PDF')], max_length=16, verbose_name=b'field type'),
),
migrations.AlterField(
model_name='formfield',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='form_fields', to='cms.FormPage'),
),
migrations.DeleteModel(
name='IdeaFormPage',
),
]
| {
"content_hash": "77942d3f712590c88dbfa8f1ad9810c7",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 438,
"avg_line_length": 42.458333333333336,
"alnum_prop": 0.5942100098135427,
"repo_name": "kingsdigitallab/kdl-django",
"id": "c87ceb65923eb7e028e8816d3d9baebf8e379b36",
"size": "2111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cms/migrations/0036_auto_20171018_1155.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "69770"
},
{
"name": "HTML",
"bytes": "38338"
},
{
"name": "JavaScript",
"bytes": "15238"
},
{
"name": "Python",
"bytes": "1140999"
},
{
"name": "Shell",
"bytes": "2704"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from typing import Any
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
class GlacierHook(AwsBaseHook):
"""Hook for connection with Amazon Glacier"""
def __init__(self, aws_conn_id: str = "aws_default") -> None:
super().__init__(client_type="glacier")
self.aws_conn_id = aws_conn_id
def retrieve_inventory(self, vault_name: str) -> dict[str, Any]:
"""
Initiate an Amazon Glacier inventory-retrieval job
:param vault_name: the Glacier vault on which job is executed
"""
job_params = {'Type': 'inventory-retrieval'}
self.log.info("Retrieving inventory for vault: %s", vault_name)
response = self.get_conn().initiate_job(vaultName=vault_name, jobParameters=job_params)
self.log.info("Initiated inventory-retrieval job for: %s", vault_name)
self.log.info("Retrieval Job ID: %s", response["jobId"])
return response
def retrieve_inventory_results(self, vault_name: str, job_id: str) -> dict[str, Any]:
"""
Retrieve the results of an Amazon Glacier inventory-retrieval job
:param vault_name: the Glacier vault on which job is executed
:param job_id: the job ID was returned by retrieve_inventory()
"""
self.log.info("Retrieving the job results for vault: %s...", vault_name)
response = self.get_conn().get_job_output(vaultName=vault_name, jobId=job_id)
return response
def describe_job(self, vault_name: str, job_id: str) -> dict[str, Any]:
"""
Retrieve the status of an Amazon S3 Glacier job, such as an
inventory-retrieval job
:param vault_name: the Glacier vault on which job is executed
:param job_id: the job ID was returned by retrieve_inventory()
"""
self.log.info("Retrieving status for vault: %s and job %s", vault_name, job_id)
response = self.get_conn().describe_job(vaultName=vault_name, jobId=job_id)
self.log.info("Job status: %s, code status: %s", response['Action'], response['StatusCode'])
return response
| {
"content_hash": "201cb48b7fa83eaaecb0b87087fb3b3d",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 100,
"avg_line_length": 42.82,
"alnum_prop": 0.6492293320878094,
"repo_name": "cfei18/incubator-airflow",
"id": "4f68559d46f03623fbf57c0a254519c3d1f1f78e",
"size": "2928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/providers/amazon/aws/hooks/glacier.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "72003"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173434"
},
{
"name": "JavaScript",
"bytes": "143068"
},
{
"name": "Jinja",
"bytes": "38808"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "22660683"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "312715"
},
{
"name": "TypeScript",
"bytes": "472379"
}
],
"symlink_target": ""
} |
import pandas as pd
import pyproj
import pytest
import geopandas._compat as compat
from shapely.geometry import Point
import numpy as np
from geopandas import GeoDataFrame, GeoSeries
crs_osgb = pyproj.CRS(27700)
crs_wgs = pyproj.CRS(4326)
N = 10
@pytest.fixture(params=["geometry", "point"])
def df(request):
geo_name = request.param
df = GeoDataFrame(
[
{
"value1": x + y,
"value2": x * y,
geo_name: Point(x, y), # rename this col in tests
}
for x, y in zip(range(N), range(N))
],
crs=crs_wgs,
geometry=geo_name,
)
# want geometry2 to be a GeoSeries not Series, test behaviour of non geom col
df["geometry2"] = df[geo_name].set_crs(crs_osgb, allow_override=True)
return df
@pytest.fixture
def df2():
"""For constructor_sliced tests"""
return GeoDataFrame(
{
"geometry": GeoSeries([Point(x, x) for x in range(3)]),
"geometry2": GeoSeries([Point(x, x) for x in range(3)]),
"geometry3": GeoSeries([Point(x, x) for x in range(3)]),
"value": [1, 2, 1],
"value_nan": np.nan,
}
)
def _check_metadata_gdf(gdf, geo_name="geometry", crs=crs_wgs):
assert gdf._geometry_column_name == geo_name
assert gdf.geometry.name == geo_name
assert gdf.crs == crs
def _check_metadata_gs(gs, name="geometry", crs=crs_wgs):
assert gs.name == name
assert gs.crs == crs
def assert_object(
result, expected_type, geo_name="geometry", crs=crs_wgs, check_none_name=False
):
"""
Helper method to make tests easier to read. Checks result is of the expected
type. If result is a GeoDataFrame or GeoSeries, checks geo_name
and crs match. If geo_name is None, then we expect a GeoDataFrame
where the geometry column is invalid/ isn't set. This is never desirable,
but is a reality of this first stage of implementation.
"""
assert type(result) is expected_type
if expected_type == GeoDataFrame:
if geo_name is not None:
_check_metadata_gdf(result, geo_name=geo_name, crs=crs)
else:
if check_none_name: # TODO this is awkward
assert result._geometry_column_name is None
if result._geometry_column_name is None:
msg = (
"You are calling a geospatial method on the GeoDataFrame, "
"but the active"
)
else:
msg = (
"You are calling a geospatial method on the GeoDataFrame, but "
r"the active geometry column \("
rf"'{result._geometry_column_name}'\) is not present"
)
with pytest.raises(AttributeError, match=msg):
result.geometry.name # be explicit that geometry is invalid here
elif expected_type == GeoSeries:
_check_metadata_gs(result, name=geo_name, crs=crs)
def test_getitem(df):
geo_name = df.geometry.name
assert_object(df[["value1", "value2"]], pd.DataFrame)
assert_object(df[[geo_name, "geometry2"]], GeoDataFrame, geo_name)
assert_object(df[[geo_name]], GeoDataFrame, geo_name)
assert_object(df[["geometry2", "value1"]], GeoDataFrame, None)
assert_object(df[["geometry2"]], GeoDataFrame, None)
assert_object(df[["value1"]], pd.DataFrame)
# Series
assert_object(df[geo_name], GeoSeries, geo_name)
assert_object(df["geometry2"], GeoSeries, "geometry2", crs=crs_osgb)
assert_object(df["value1"], pd.Series)
def test_loc(df):
geo_name = df.geometry.name
assert_object(df.loc[:, ["value1", "value2"]], pd.DataFrame)
assert_object(df.loc[:, [geo_name, "geometry2"]], GeoDataFrame, geo_name)
assert_object(df.loc[:, [geo_name]], GeoDataFrame, geo_name)
assert_object(df.loc[:, ["geometry2", "value1"]], GeoDataFrame, None)
assert_object(df.loc[:, ["geometry2"]], GeoDataFrame, None)
assert_object(df.loc[:, ["value1"]], pd.DataFrame)
# Series
assert_object(df.loc[:, geo_name], GeoSeries, geo_name)
assert_object(df.loc[:, "geometry2"], GeoSeries, "geometry2", crs=crs_osgb)
assert_object(df.loc[:, "value1"], pd.Series)
def test_iloc(df):
geo_name = df.geometry.name
assert_object(df.iloc[:, 0:2], pd.DataFrame)
assert_object(df.iloc[:, 2:4], GeoDataFrame, geo_name)
assert_object(df.iloc[:, [2]], GeoDataFrame, geo_name)
assert_object(df.iloc[:, [3, 0]], GeoDataFrame, None)
assert_object(df.iloc[:, [3]], GeoDataFrame, None)
assert_object(df.iloc[:, [0]], pd.DataFrame)
# Series
assert_object(df.iloc[:, 2], GeoSeries, geo_name)
assert_object(df.iloc[:, 3], GeoSeries, "geometry2", crs=crs_osgb)
assert_object(df.iloc[:, 0], pd.Series)
def test_squeeze(df):
geo_name = df.geometry.name
assert_object(df[[geo_name]].squeeze(), GeoSeries, geo_name)
assert_object(df[["geometry2"]].squeeze(), GeoSeries, "geometry2", crs=crs_osgb)
def test_to_frame(df):
geo_name = df.geometry.name
res1 = df[geo_name].to_frame()
assert_object(res1, GeoDataFrame, geo_name, crs=df[geo_name].crs)
res2 = df["geometry2"].to_frame()
assert_object(res2, GeoDataFrame, "geometry2", crs=crs_osgb)
res3 = df["value1"].to_frame()
assert_object(res3, pd.DataFrame)
def test_reindex(df):
geo_name = df.geometry.name
assert_object(df.reindex(columns=["value1", "value2"]), pd.DataFrame)
assert_object(df.reindex(columns=[geo_name, "geometry2"]), GeoDataFrame, geo_name)
assert_object(df.reindex(columns=[geo_name]), GeoDataFrame, geo_name)
assert_object(df.reindex(columns=["new_col", geo_name]), GeoDataFrame, geo_name)
assert_object(df.reindex(columns=["geometry2", "value1"]), GeoDataFrame, None)
assert_object(df.reindex(columns=["geometry2"]), GeoDataFrame, None)
assert_object(df.reindex(columns=["value1"]), pd.DataFrame)
# reindexing the rows always preserves the GeoDataFrame
assert_object(df.reindex(index=[0, 1, 20]), GeoDataFrame, geo_name)
# reindexing both rows and columns
assert_object(
df.reindex(index=[0, 1, 20], columns=[geo_name]), GeoDataFrame, geo_name
)
assert_object(df.reindex(index=[0, 1, 20], columns=["value1"]), pd.DataFrame)
def test_drop(df):
geo_name = df.geometry.name
assert_object(df.drop(columns=[geo_name, "geometry2"]), pd.DataFrame)
assert_object(df.drop(columns=["value1", "value2"]), GeoDataFrame, geo_name)
cols = ["value1", "value2", "geometry2"]
assert_object(df.drop(columns=cols), GeoDataFrame, geo_name)
assert_object(df.drop(columns=[geo_name, "value2"]), GeoDataFrame, None)
assert_object(df.drop(columns=["value1", "value2", geo_name]), GeoDataFrame, None)
assert_object(df.drop(columns=["geometry2", "value2", geo_name]), pd.DataFrame)
def test_apply(df):
geo_name = df.geometry.name
def identity(x):
return x
# axis = 0
assert_object(df[["value1", "value2"]].apply(identity), pd.DataFrame)
assert_object(df[[geo_name, "geometry2"]].apply(identity), GeoDataFrame, geo_name)
assert_object(df[[geo_name]].apply(identity), GeoDataFrame, geo_name)
assert_object(df[["geometry2", "value1"]].apply(identity), GeoDataFrame, None, None)
assert_object(df[["geometry2"]].apply(identity), GeoDataFrame, None, None)
assert_object(df[["value1"]].apply(identity), pd.DataFrame)
# axis = 0, Series
assert_object(df[geo_name].apply(identity), GeoSeries, geo_name)
assert_object(df["geometry2"].apply(identity), GeoSeries, "geometry2", crs=crs_osgb)
assert_object(df["value1"].apply(identity), pd.Series)
# axis = 0, Series, no longer geometry
assert_object(df[geo_name].apply(lambda x: str(x)), pd.Series)
assert_object(df["geometry2"].apply(lambda x: str(x)), pd.Series)
# axis = 1
assert_object(df[["value1", "value2"]].apply(identity, axis=1), pd.DataFrame)
assert_object(
df[[geo_name, "geometry2"]].apply(identity, axis=1), GeoDataFrame, geo_name
)
assert_object(df[[geo_name]].apply(identity, axis=1), GeoDataFrame, geo_name)
# TODO below should be a GeoDataFrame to be consistent with new getitem logic
# leave as follow up as quite complicated
# FrameColumnApply.series_generator returns object dtypes Series, so will have
# patch result of apply
assert_object(df[["geometry2", "value1"]].apply(identity, axis=1), pd.DataFrame)
assert_object(df[["value1"]].apply(identity, axis=1), pd.DataFrame)
@pytest.mark.xfail(not compat.PANDAS_GE_11, reason="apply is different in pandas 1.0.5")
def test_apply_axis1_secondary_geo_cols(df):
# note #GH2436 would also fix this
def identity(x):
return x
assert_object(df[["geometry2"]].apply(identity, axis=1), GeoDataFrame, None, None)
def test_expanddim_in_apply():
# https://github.com/geopandas/geopandas/pull/2296#issuecomment-1021966443
s = GeoSeries.from_xy([0, 1], [0, 1])
result = s.apply(lambda x: pd.Series([x.x, x.y]))
assert_object(result, pd.DataFrame)
@pytest.mark.xfail(
not compat.PANDAS_GE_11,
reason="pandas <1.1 don't preserve subclass through groupby ops", # Pandas GH33884
)
def test_expandim_in_groupby_aggregate_multiple_funcs():
# https://github.com/geopandas/geopandas/pull/2296#issuecomment-1021966443
# There are two calls to _constructor_expanddim here
# SeriesGroupBy._aggregate_multiple_funcs() and
# SeriesGroupBy._wrap_series_output() len(output) > 1
s = GeoSeries.from_xy([0, 1, 2], [0, 1, 3])
def union(s):
return s.unary_union
def total_area(s):
return s.area.sum()
grouped = s.groupby([0, 1, 0])
agg = grouped.agg([total_area, union])
assert_object(agg, GeoDataFrame, None, None, check_none_name=True)
result = grouped.agg([union, total_area])
assert_object(result, GeoDataFrame, None, None, check_none_name=True)
assert_object(grouped.agg([total_area, total_area]), pd.DataFrame)
assert_object(grouped.agg([total_area]), pd.DataFrame)
@pytest.mark.xfail(
not compat.PANDAS_GE_11,
reason="pandas <1.1 uses concat([Series]) in unstack", # Pandas GH33356
)
def test_expanddim_in_unstack():
# https://github.com/geopandas/geopandas/pull/2296#issuecomment-1021966443
s = GeoSeries.from_xy(
[0, 1, 2],
[0, 1, 3],
index=pd.MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "a")]),
)
unstack = s.unstack()
assert_object(unstack, GeoDataFrame, None, None, False)
if compat.PANDAS_GE_12:
assert unstack._geometry_column_name is None
else: # pandas GH37369, unstack doesn't call finalize
assert unstack._geometry_column_name == "geometry"
# https://github.com/geopandas/geopandas/issues/2486
s.name = "geometry"
unstack = s.unstack()
assert_object(unstack, GeoDataFrame, None, None)
# indexing / constructor_sliced tests
test_case_column_sets = [
["geometry"],
["geometry2"],
["geometry", "geometry2"],
# non active geo col case
["geometry", "value"],
["geometry", "value_nan"],
["geometry2", "value"],
["geometry2", "value_nan"],
]
@pytest.mark.parametrize(
"column_set",
test_case_column_sets,
ids=[", ".join(i) for i in test_case_column_sets],
)
def test_constructor_sliced_row_slices(df2, column_set):
# https://github.com/geopandas/geopandas/issues/2282
df_subset = df2[column_set]
assert isinstance(df_subset, GeoDataFrame)
res = df_subset.loc[0]
# row slices shouldn't be GeoSeries, even if they have a geometry col
assert type(res) == pd.Series
if "geometry" in column_set:
assert not isinstance(res.geometry, pd.Series)
assert res.geometry == Point(0, 0)
def test_constructor_sliced_column_slices(df2):
# Note loc doesn't use _constructor_sliced so it's not tested here
geo_idx = df2.columns.get_loc("geometry")
sub = df2.head(1)
# column slices should be GeoSeries if of geometry type
assert type(sub.iloc[:, geo_idx]) == GeoSeries
assert type(sub.iloc[[0], geo_idx]) == GeoSeries
sub = df2.head(2)
assert type(sub.iloc[:, geo_idx]) == GeoSeries
assert type(sub.iloc[[0, 1], geo_idx]) == GeoSeries
# check iloc row slices are pd.Series instead
assert type(df2.iloc[0, :]) == pd.Series
def test_constructor_sliced_in_pandas_methods(df2):
# constructor sliced is used in many places, checking a sample of non
# geometry cases are sensible
assert type(df2.count()) == pd.Series
# drop the secondary geometry columns as not hashable
hashable_test_df = df2.drop(columns=["geometry2", "geometry3"])
assert type(hashable_test_df.duplicated()) == pd.Series
assert type(df2.quantile()) == pd.Series
assert type(df2.memory_usage()) == pd.Series
| {
"content_hash": "bcf520bc2ccaba659245f868ecc8a8c9",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 88,
"avg_line_length": 36.6875,
"alnum_prop": 0.6477466315626452,
"repo_name": "geopandas/geopandas",
"id": "5a447f3983ba04d27715d3f314aab69d605fbfbf",
"size": "12914",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "geopandas/tests/test_op_output_types.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "32111"
},
{
"name": "Python",
"bytes": "1304336"
},
{
"name": "Shell",
"bytes": "754"
}
],
"symlink_target": ""
} |
from cobra.utils import normalize
def norm(data):
return normalize(data)
| {
"content_hash": "1f21970c3a157890e79ec2225bf766c5",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 33,
"avg_line_length": 19.5,
"alnum_prop": 0.7564102564102564,
"repo_name": "niwinz/cobrascript",
"id": "1c75ba48ae7018192a5f6740f29aabe6961aa8fd",
"size": "103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "74768"
}
],
"symlink_target": ""
} |
import os
from . import app, STATIC_URL
from . import gist
from flask import render_template, redirect
@app.route('/')
def homepage():
return render_gist('b5807b9c969cef7420e0e6d4884aafd3')
@app.route('/impressum')
def render_impressum():
return render_template('impressum.html')
@app.route('/<hash:id>')
def render_gist(id):
return redirect("https://gist.github.com/" + id, code=301)
| {
"content_hash": "59142a9809c492750e7a344e2f965828",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 62,
"avg_line_length": 22.333333333333332,
"alnum_prop": 0.7114427860696517,
"repo_name": "x3ro/draft.sx",
"id": "5eadf935aa7b545871bd33f8a43370e5d865edc8",
"size": "402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "draft/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "23852"
},
{
"name": "HTML",
"bytes": "10223"
},
{
"name": "Makefile",
"bytes": "249"
},
{
"name": "Python",
"bytes": "7868"
},
{
"name": "Ruby",
"bytes": "925"
},
{
"name": "Shell",
"bytes": "276"
}
],
"symlink_target": ""
} |
import sys
import tensorflow as tf
import numpy as np
from numpy import genfromtxt
import requests
import csv
from sklearn import datasets
from sklearn.cross_validation import train_test_split
import sklearn
from scipy import stats
import getopt
from StringIO import StringIO
import requests
# Convert to one hot
def convertOneHot(data):
y=np.array([int(i[0]) for i in data])
y_onehot=[0]*len(y)
for i,j in enumerate(y):
y_onehot[i]=[0]*(y.max() + 1)
y_onehot[i][j]=1
return (y,y_onehot)
# find most common element
def mode(arr) :
m = max([arr.count(a) for a in arr])
return [x for x in arr if arr.count(x) == m][0] if m>1 else None
def main():
# get data from arguments
train=str(sys.argv[1]);
test=str(sys.argv[2]);
train = train.replace('\n',' \r\n')
train = train.replace('n',' \r\n')
test = test.replace('\n',' \r\n')
test = test.replace('n',' \r\n')
#print train
#print test
data = genfromtxt(StringIO(train),delimiter=',') # Training data
test_data = genfromtxt(StringIO(test),delimiter=',') # Test data
#print data
#print test_data
x_train=np.array([ i[1::] for i in data])
y_train,y_train_onehot = convertOneHot(data)
x_test=np.array([ i[1::] for i in test_data])
y_test,y_test_onehot = convertOneHot(test_data)
# A number of features, 5 in this cose (one per finger)
# B = number of gesture possibilities
A=data.shape[1]-1 # Number of features, Note first is y
B=len(y_train_onehot[0])
tf_in = tf.placeholder("float", [None, A]) # Features
tf_weight = tf.Variable(tf.zeros([A,B]))
tf_bias = tf.Variable(tf.zeros([B]))
tf_softmax = tf.nn.softmax(tf.matmul(tf_in,tf_weight) + tf_bias)
# Training via backpropagation
tf_softmax_correct = tf.placeholder("float", [None,B])
tf_cross_entropy = -tf.reduce_sum(tf_softmax_correct*tf.log(tf_softmax))
# Train using tf.train.GradientDescentOptimizer
tf_train_step = tf.train.GradientDescentOptimizer(0.01).minimize(tf_cross_entropy)
# Add accuracy checking nodes
tf_correct_prediction = tf.equal(tf.argmax(tf_softmax,1), tf.argmax(tf_softmax_correct,1))
tf_accuracy = tf.reduce_mean(tf.cast(tf_correct_prediction, "float"))
# Initialize and run
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
#print("...")
# Run the training
for i in range(6):
sess.run(tf_train_step, feed_dict={tf_in: x_train, tf_softmax_correct: y_train_onehot})
#calculate accuracy from test data
#result = sess.run(tf_accuracy, feed_dict={tf_in: x_test, tf_softmax_correct: y_test_onehot})
#print "Run {},{}".format(i,result)
#make Prediction after training
prediction=tf.argmax(tf_softmax,1)
guess = prediction.eval(feed_dict={tf_in: x_test}, session=sess)
# calculate most common gesture ID
print int(stats.mode(guess)[0][0])
#r = requests.post("http://localhost:3000/api/receiveAnswer", data = {"prediction": int(stats.mode(guess)[0][0])})
return 0
if __name__ == "__main__":
main()
| {
"content_hash": "ca4bda8068e33fda72ee49d5211cdae2",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 117,
"avg_line_length": 32.52127659574468,
"alnum_prop": 0.6630683676807327,
"repo_name": "yuriyminin/leap-gesture",
"id": "7619107659311a4e2b5ab5618bd7b06b7d120f73",
"size": "3079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "machineLearning.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "385764"
},
{
"name": "HTML",
"bytes": "28789"
},
{
"name": "JavaScript",
"bytes": "43334"
},
{
"name": "Python",
"bytes": "3079"
}
],
"symlink_target": ""
} |
import sys
def print_obj(x, fline=""):
for field in x._meta.get_fields():
if field.is_relation:
if not hasattr(field, "get_accessor_name"):
if getattr(field, 'many_to_many'):
m2m_field = getattr(x, field.name)
print(f"{fline}{field.name}:")
flineind = fline + '\t'
for i, ent in enumerate(getattr(m2m_field, "all")(), 1):
print(f"{flineind}#{i}: {ent}")
else:
print(
f"{fline}{field.name}.ForeignKeyID: "
f"{getattr(getattr(x, field.name),'id')}"
)
else:
accessor = field.get_accessor_name()
relobj = getattr(x, accessor)
print(f"{accessor}:")
for f in relobj.all():
print_obj(f, fline + "\t")
else:
print(f"{fline}{field.name}: {getattr(x, field.name)}")
def updt(total, progress):
"""
Displays or updates a console progress bar.
Original source: https://stackoverflow.com/a/15860757/1391441
This version is from stack overflow user Gabriel:
https://stackoverflow.com/users/1391441/gabriel
"""
barLength, status = 20, ""
progress = float(progress) / float(total)
if progress >= 1.:
progress, status = 1, "\r\n"
block = int(round(barLength * progress))
text = "\r[{}] {:.2f}% {}".format(
"#" * block + "-" * (barLength - block), round(progress * 100, 2),
status)
sys.stdout.write(text)
sys.stdout.flush()
| {
"content_hash": "12b143b043834e7fcb8f3483d5f9115d",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 76,
"avg_line_length": 35.95652173913044,
"alnum_prop": 0.4957678355501814,
"repo_name": "Crimson-Star-Software/data-combine",
"id": "0c50a7fd6249ddcccd5a4c2819484e369888dbdc",
"size": "1654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datacombine/datacombine/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3066"
},
{
"name": "HTML",
"bytes": "10885"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "91046"
},
{
"name": "Shell",
"bytes": "1509"
}
],
"symlink_target": ""
} |
import json
import os
import yaml
import click
import dateutil.parser
from dateutil import tz
from canvas_data.api import CanvasDataAPI
from canvas_data.ddl_utils import ddl_from_json
class HyphenUnderscoreAliasedGroup(click.Group):
def get_command(self, ctx, cmd_name):
# try to find the command as typed
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
# try to find the command with underscores replaced with hyphens
underscore_cmd_name = cmd_name.replace(u'_', u'-')
rv = click.Group.get_command(self, ctx, underscore_cmd_name)
return rv
@click.group(cls=HyphenUnderscoreAliasedGroup)
@click.option('-c', '--config', type=click.File('r'), envvar='CANVAS_DATA_CONFIG')
@click.option('--api-key', envvar='CANVAS_DATA_API_KEY')
@click.option('--api-secret', envvar='CANVAS_DATA_API_SECRET')
@click.pass_context
def cli(ctx, config, api_key, api_secret):
"""A command-line tool to work with Canvas Data. Command-specific help
is available at: canvas-data COMMAND --help"""
# if a config file was specified, read settings from that
if config:
ctx.obj = yaml.load(config)
else:
ctx.obj = {}
# if options were passed in, use them, possibly overriding config file settings
if api_key:
ctx.obj['api_key'] = api_key
if api_secret:
ctx.obj['api_secret'] = api_secret
@cli.command(name='get-schema')
@click.option('--version', default='latest')
@click.pass_context
def get_schema(ctx, version):
"""Gets a particular version of the Canvas Data schema (latest by default) and outputs as JSON"""
cd = CanvasDataAPI(
api_key=ctx.obj.get('api_key'),
api_secret=ctx.obj.get('api_secret')
)
schema = cd.get_schema(version, key_on_tablenames=True)
click.echo(json.dumps(schema, sort_keys=True, indent=4))
@cli.command(name='get-ddl')
@click.option('--version', default='latest')
@click.pass_context
def get_ddl(ctx, version):
"""Gets DDL for a particular version of the Canvas Data schema (latest by default)"""
cd = CanvasDataAPI(
api_key=ctx.obj.get('api_key'),
api_secret=ctx.obj.get('api_secret')
)
json_schema = cd.get_schema(version, key_on_tablenames=True)
create_ddl, drop_ddl = ddl_from_json(json_schema)
for t in drop_ddl:
click.echo('{};'.format(t))
for t in create_ddl:
click.echo('{};'.format(t))
@cli.command(name='list-dumps')
@click.pass_context
def list_dumps(ctx):
"""Lists available dumps"""
cd = CanvasDataAPI(
api_key=ctx.obj.get('api_key'),
api_secret=ctx.obj.get('api_secret')
)
dumps = cd.get_dumps()
for d in dumps:
create_date = dateutil.parser.parse(d['createdAt'])
localtz = tz.tzlocal()
create_date = create_date.astimezone(localtz)
detail_str = '{}\tsequence: {}\tfiles: {}\tschema: {}\tid: {}'.format(create_date, d['sequence'], d['numFiles'], d['schemaVersion'], d['dumpId'])
if d['numFiles'] < 60:
click.secho(detail_str, bg='blue', fg='white')
else:
click.echo(detail_str)
@cli.command(name='get-dump-files')
@click.option('--dump-id', default='latest', help='get files for this dump (defaults to the latest dump)')
@click.option('--download-dir', default=None, type=click.Path(), help='store downloaded files in this directory')
@click.option('--table', default=None, help='(optional) only get the files for a particular table')
@click.option('--force', is_flag=True, default=False, help='re-download files even if they already exist (default False)')
@click.pass_context
def get_dump_files(ctx, dump_id, download_dir, table, force):
"""Downloads the Canvas Data files for a particular dump. Can be optionally limited to a single table."""
if download_dir:
ctx.obj['download_dir'] = download_dir
if table:
ctx.obj['table'] = table
cd = CanvasDataAPI(
api_key=ctx.obj.get('api_key'),
api_secret=ctx.obj.get('api_secret')
)
if dump_id is 'latest':
dump_id = cd.get_latest_regular_dump()
# first, get the dump details so we can extract the list of fragment files to download
dump_files = []
dump_details = cd.get_file_urls(dump_id=dump_id)
if ctx.obj.get('table'):
dump_files.extend(dump_details['artifactsByTable'][ctx.obj['table']]['files'])
else:
for k, v in dump_details['artifactsByTable'].items():
if k == 'requests':
continue
dump_files.extend(v['files'])
filenames = []
progress_label = '{: <23}'.format('Downloading {} files'.format(len(dump_files)))
with click.progressbar(dump_files, label=progress_label) as file_list:
for f in file_list:
filenames.append(cd.get_file(file=f, download_directory=ctx.obj['download_dir'], force=force))
click.echo('Done.')
@cli.command(name='unpack-dump-files')
@click.option('--dump-id', default='latest', help='get files for this dump (defaults to the latest dump)')
@click.option('--download-dir', default=None, type=click.Path(), help='store downloaded files in this directory')
@click.option('--data-dir', default=None, type=click.Path(), help='store unpacked files in this directory')
@click.option('-t', '--table', default=None, help='(optional) only get the files for a particular table')
@click.option('--force', is_flag=True, default=False, help='re-download/re-unpack files even if they already exist (default False)')
@click.pass_context
def unpack_dump_files(ctx, dump_id, download_dir, data_dir, table, force):
"""
Downloads, uncompresses and re-assembles the Canvas Data files for a dump. Can be
optionally limited to a single table.
"""
if download_dir:
ctx.obj['download_dir'] = download_dir
if data_dir:
ctx.obj['data_dir'] = data_dir
if table:
ctx.obj['table'] = table
cd = CanvasDataAPI(
api_key=ctx.obj.get('api_key'),
api_secret=ctx.obj.get('api_secret')
)
if dump_id is 'latest':
dump_id = cd.get_latest_regular_dump()
# first make sure all of the files are downloaded
ctx.invoke(get_dump_files, dump_id=dump_id, download_dir=ctx.obj['download_dir'], table=ctx.obj.get('table'), force=force)
dump_details = cd.get_file_urls(dump_id=dump_id)
sequence = dump_details['sequence']
table_names = []
if ctx.obj.get('table'):
table_names.append(ctx.obj['table'])
else:
table_names.extend(dump_details['artifactsByTable'].keys())
table_names.remove('requests')
data_file_names = []
progress_label = '{: <23}'.format('Unpacking {} tables'.format(len(table_names)))
# store the data files in dump-specific subdirectory named after the sequence
dump_data_dir = os.path.join(ctx.obj['data_dir'], str(sequence))
with click.progressbar(table_names, label=progress_label) as tnames:
for t in tnames:
data_file_names.append(cd.get_data_for_table(table_name=t,
dump_id=dump_id,
download_directory=ctx.obj['download_dir'],
data_directory=dump_data_dir,
force=force))
if ctx.obj.get('table'):
reload_script = 'reload_{}.sql'.format(ctx.obj['table'])
else:
reload_script = 'reload_all.sql'
with open(os.path.join(dump_data_dir, reload_script), 'w') as sqlfile:
for df in data_file_names:
abs_df = os.path.abspath(df)
df_path, df_file = os.path.split(df)
table_name, ext = df_file.split('.')
if not dump_details['artifactsByTable'][table_name]['partial']:
# not a partial dump for this table - truncate the table first
sqlfile.write('TRUNCATE TABLE {};\n'.format(table_name))
sqlfile.write("COPY {} FROM '{}';\n".format(table_name, abs_df))
click.echo('Done.')
| {
"content_hash": "08947f460bb90ae1d76f36e478614b0b",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 153,
"avg_line_length": 39.519417475728154,
"alnum_prop": 0.6287925316300209,
"repo_name": "Harvard-University-iCommons/canvas-data-sdk",
"id": "ed96bda5507521b7725b26594588c444f28373f8",
"size": "8141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "canvas_data/scripts/canvasdata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30514"
}
],
"symlink_target": ""
} |
import mock
import copy
from cloudify.state import current_ctx
from cloudify.mocks import MockCloudifyContext
from cloudify.exceptions import NonRecoverableError
from cloudify_types.component.constants import CAPABILITIES
from ..operations import execute_workflow
from ..constants import SHARED_RESOURCE_TYPE
from .base_test_suite import TestSharedResourceBase, NODE_PROPS
class TestExecuteWorkflow(TestSharedResourceBase):
@staticmethod
def get_mock_ctx(test_name,
mock_node_type,
retry_number=0,
node_props=NODE_PROPS):
def mock_retry(_):
return 'RETRIED'
operation = {
'retry_number': retry_number
}
target_node_ctx = MockCloudifyContext(
node_id=test_name,
node_name=test_name,
node_type=mock_node_type,
deployment_id=test_name,
operation=operation,
properties=node_props
)
ctx = MockCloudifyContext(
target=target_node_ctx
)
ctx.operation._operation_context = {'name': 'some.test'}
ctx.operation.retry = lambda msg: mock_retry(msg)
return ctx
def setUp(self):
super(TestExecuteWorkflow, self).setUp()
self.total_patch = \
mock.patch('cloudify_rest_client.responses.Pagination.total',
new_callable=mock.PropertyMock)
self.total_patch = self.total_patch.start()
self.total_patch.return_value = 1
self.offset_patch = \
mock.patch('cloudify_rest_client.responses.Pagination.offset',
new_callable=mock.PropertyMock)
self.offset_patch = self.offset_patch.start()
self.offset_patch.return_value = 1
def tearDown(self):
self.offset_patch.stop()
self.total_patch.stop()
super(TestExecuteWorkflow, self).tearDown()
def test_basic_run(self):
with mock.patch('cloudify.manager.get_rest_client') as mock_client:
self.cfy_mock_client.deployments.capabilities.get = \
mock.MagicMock(return_value={'capabilities': {}})
mock_client.return_value = self.cfy_mock_client
poll_with_timeout_test = \
'cloudify_types.component.polling.poll_with_timeout'
with mock.patch(poll_with_timeout_test) as poll:
poll.return_value = True
execute_workflow('test',
parameters={})
def test_failed_run_on_non_shared_resource_node(self):
self._ctx = self.get_mock_ctx('test', 'not_shared_resource')
current_ctx.set(self._ctx)
self.assertRaises(NonRecoverableError, execute_workflow,
'test',
parameters={})
def test_failure_after_execution_failed(self):
with mock.patch('cloudify.manager.get_rest_client') as mock_client:
mock_client.return_value = self.cfy_mock_client
poll_with_timeout_test = \
'cloudify_types.component.polling.poll_with_timeout'
with mock.patch(poll_with_timeout_test) as poll:
poll.return_value = True
verify_execution_state_patch = (
'cloudify_types.shared_resource.'
'execute_shared_resource_workflow.verify_execution_state')
with mock.patch(verify_execution_state_patch) as verify:
verify.return_value = False
self.assertRaises(NonRecoverableError, execute_workflow,
'test',
parameters={}
)
def test_retrying_after_waiting_all_executions_timed_out(self):
with mock.patch('cloudify.manager.get_rest_client') as mock_client:
mock_client.return_value = self.cfy_mock_client
poll_with_timeout_test = (
'cloudify_types.shared_resource.'
'execute_shared_resource_workflow.poll_with_timeout')
with mock.patch(poll_with_timeout_test) as poll:
poll.return_value = False
result = execute_workflow('test',
parameters={})
self.assertEqual(result, 'RETRIED')
def test_cloudify_configuration_used(self):
shared_resources_with_client = copy.deepcopy(NODE_PROPS)
shared_resources_with_client['client'] = {'test': 1}
self._ctx = self.get_mock_ctx('test',
SHARED_RESOURCE_TYPE,
node_props=shared_resources_with_client)
current_ctx.set(self._ctx)
with mock.patch('cloudify_types.shared_resource.'
'execute_shared_resource_workflow.'
'CloudifyClient') as mock_client:
self.cfy_mock_client.deployments.capabilities.get = \
mock.MagicMock(return_value={
CAPABILITIES:
{'test': 1}
})
mock_client.return_value = self.cfy_mock_client
poll_with_timeout_test = (
'cloudify_types.shared_resource.'
'execute_shared_resource_workflow.poll_with_timeout')
with mock.patch(poll_with_timeout_test) as poll:
poll.return_value = True
verify_execution_state_patch = (
'cloudify_types.shared_resource.'
'execute_shared_resource_workflow.verify_execution_state')
with mock.patch(verify_execution_state_patch) as verify:
verify.return_value = True
execute_workflow('test',
parameters={})
self.assertEqual(mock_client.called, True)
self.assertEqual(
{'test': 1},
(self._ctx.target.instance.runtime_properties
[CAPABILITIES]))
| {
"content_hash": "e40d1d8652e55d44890d5fc5b1e1f44e",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 78,
"avg_line_length": 41.986301369863014,
"alnum_prop": 0.5593800978792822,
"repo_name": "cloudify-cosmo/cloudify-manager",
"id": "9d06b4635604394e67e52d30d4ac7041fa132d2c",
"size": "6744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudify_types/cloudify_types/shared_resource/tests/test_execute_workflow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "4067"
},
{
"name": "Dockerfile",
"bytes": "3843"
},
{
"name": "HTML",
"bytes": "320"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "PLpgSQL",
"bytes": "119062"
},
{
"name": "Python",
"bytes": "3825971"
},
{
"name": "Shell",
"bytes": "49121"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
from codecs import open
from microproxy.version import VERSION
import os
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
with open(os.path.join(here, './requirements/proxy.txt')) as f:
proxy_deps = [dep for dep in f.read().split("\n") if dep]
with open(os.path.join(here, './requirements/viewer.txt')) as f:
viewer_deps = [dep for dep in f.read().split("\n") if dep]
with open(os.path.join(here, './requirements/development.txt')) as f:
dev_deps = [dep for dep in f.read().split("\n") if dep and "-r" not in dep]
setup(
name="microProxy",
version=VERSION,
description="A http/https interceptor proxy written in python inspired by mitmproxy",
long_description=long_description,
url="https://github.com/mike820324/microProxy",
author="MicroMike",
author_email="mike820324@gmail.com",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Environment :: Console",
"Environment :: Console :: Curses",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Security",
"Topic :: Internet",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: Proxy Servers",
"Topic :: Software Development :: Testing"
],
packages=find_packages(include=[
"microproxy", "microproxy.*",
]),
include_package_data=True,
entry_points={
'console_scripts': [
"mpserver=microproxy.command_line:mpserver",
"mptui=microproxy.command_line:mptui",
"mpdump=microproxy.command_line:mpdump",
]
},
install_requires=[
"tornado==4.3",
"pyzmq==15.4.0",
"watchdog==0.8.3",
"pyOpenSSL==16.0.0",
"service-identity==16.0.0",
"certifi==2016.8.8",
"construct==2.8.8, < 2.9.0",
"six==1.10.0",
"h2==2.4.2",
"h11==0.7.0",
"socks5==0.2.1"
],
extras_require={
'viewer': viewer_deps,
'develop': dev_deps
}
)
| {
"content_hash": "96e15801f28e48425e191e9793646f4e",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 89,
"avg_line_length": 33.21621621621622,
"alnum_prop": 0.5895036615134256,
"repo_name": "mike820324/microProxy",
"id": "0fd66c7693147c80afb1f660e8fd83688d251f65",
"size": "2458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "374"
},
{
"name": "Python",
"bytes": "361092"
},
{
"name": "Shell",
"bytes": "2705"
}
],
"symlink_target": ""
} |
from __future__ import division, absolute_import, print_function, unicode_literals
from time import sleep, time
from threading import Lock, Thread
from ant.core.constants import MESSAGE_TX_SYNC, RESPONSE_NO_ERROR
from ant.core.message import Message, ChannelEventResponseMessage
from ant.core.exceptions import MessageError
from usb.core import USBError
def EventPump(evm):
buffer_ = b''
while True:
with evm.runningLock:
if not evm.running:
break
try:
buffer_ += evm.driver.read(20)
except USBError as e:
if e.errno in (60, 110): # timeout
continue
else:
raise
messages = []
while len(buffer_) > 0:
try:
msg = Message.decode(buffer_)
messages.append(msg)
buffer_ = buffer_[len(msg):]
except MessageError as err:
if err.internal is not Message.INCOMPLETE:
i, length = 1, len(buffer_)
# move to the next SYNC byte
while i < length and ord(buffer_[i]) != MESSAGE_TX_SYNC:
i += 1
buffer_ = buffer_[i:]
else:
break
with evm.evmCallbackLock:
for message in messages:
for callback in evm.callbacks:
try:
callback.process(message)
except Exception as err: # pylint: disable=broad-except
print(err)
class EventCallback(object):
def process(self, msg):
raise NotImplementedError()
class EventMachineCallback(EventCallback):
MAX_QUEUE = 25
WAIT_UNTIL = staticmethod(lambda _,__:None)
def __init__(self):
self.messages = []
self.lock = Lock()
def process(self, msg):
with self.lock:
messages = self.messages
messages.append(msg)
MAX_QUEUE = self.MAX_QUEUE
if len(messages) > MAX_QUEUE:
self.messages = messages[-MAX_QUEUE:]
def waitFor(self, foo, timeout=10): # pylint: disable=blacklisted-name
messages = self.messages
basetime = time()
while time() - basetime < timeout:
with self.lock:
for emsg in messages:
if self.WAIT_UNTIL(foo, emsg):
messages.remove(emsg)
return emsg
sleep(0.001)
raise MessageError("%s: timeout" % str(foo), internal=foo)
class AckCallback(EventMachineCallback):
WAIT_UNTIL = staticmethod(lambda msg, emsg: msg.type == emsg.messageID)
def process(self, msg):
if isinstance(msg, ChannelEventResponseMessage) and \
msg.messageID != 1: # response message, not event
super(AckCallback, self).process(msg)
class MsgCallback(EventMachineCallback):
WAIT_UNTIL = staticmethod(lambda class_, emsg: isinstance(emsg, class_))
class EventMachine(object):
def __init__(self, driver):
self.driver = driver
self.callbacks = set()
self.eventPump = None
self.running = False
self.evmCallbackLock = Lock()
self.runningLock = Lock()
self.ack = ack = AckCallback()
self.msg = msg = MsgCallback()
self.registerCallback(ack)
self.registerCallback(msg)
def registerCallback(self, callback):
with self.evmCallbackLock:
self.callbacks.add(callback)
def removeCallback(self, callback):
with self.evmCallbackLock:
try:
self.callbacks.remove(callback)
except KeyError:
pass
def writeMessage(self, msg):
self.driver.write(msg)
return self
def waitForAck(self, msg):
response = self.ack.waitFor(msg).messageCode
if response != RESPONSE_NO_ERROR:
raise MessageError("bad response code (%.2x)" % response,
internal=(msg, response))
def waitForMessage(self, class_):
return self.msg.waitFor(class_)
def start(self, name=None, driver=None):
with self.runningLock:
if self.running:
return
self.running = True
if driver is not None:
self.driver = driver
self.driver.open()
evPump = self.eventPump = Thread(name=name, target=EventPump, args=(self,))
evPump.start()
def stop(self):
with self.runningLock:
if not self.running:
return
self.running = False
self.eventPump.join()
self.driver.close()
| {
"content_hash": "8aaf960ef019268555c7007732fa8d59",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 87,
"avg_line_length": 31.05095541401274,
"alnum_prop": 0.5423589743589744,
"repo_name": "SamyCookie/python-ant",
"id": "a8a0efe36deff14da758aed72676b80df7888952",
"size": "6337",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/ant/core/event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "99487"
},
{
"name": "Shell",
"bytes": "799"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
PACKAGES = find_packages()
# Get version and release info, which is all stored in shablona/version.py
ver_file = os.path.join('keratin', 'version.py')
with open(ver_file) as f:
exec(f.read())
REQUIRES = []
with open('requirements.txt') as f:
line = f.readline()[:-1]
while line:
REQUIRES.append(line)
line = f.readline()[:-1]
opts = dict(name=NAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url=URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
classifiers=CLASSIFIERS,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
platforms=PLATFORMS,
version=VERSION,
packages=PACKAGES,
package_data=PACKAGE_DATA,
install_requires=REQUIRES,
requires=REQUIRES)
if __name__ == '__main__':
setup(**opts)
| {
"content_hash": "a1fcecd128d5c793c581cf46987510ed",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 74,
"avg_line_length": 27.42105263157895,
"alnum_prop": 0.5921305182341651,
"repo_name": "uw-biomedical-ml/keratin",
"id": "8d6b1f0dd19af64cbf36722fa421f8afe9b58c52",
"size": "1042",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "378"
},
{
"name": "Python",
"bytes": "17204"
}
],
"symlink_target": ""
} |
"""
This script is a trick to setup a fake Django environment, since this reusable
app will be developed and tested outside any specifiv Django project.
Via ``settings.configure`` you will be able to set all necessary settings
for your app and run the tests as if you were calling ``./manage.py test``.
"""
import re
import sys
from django.conf import settings
import coverage
from fabric.api import abort, lcd, local
from fabric.colors import green, red
import holidays.settings.test_settings as test_settings
if not settings.configured:
settings.configure(**test_settings.__dict__)
from django_coverage.coverage_runner import CoverageRunner
from django_nose import NoseTestSuiteRunner
class NoseCoverageTestRunner(CoverageRunner, NoseTestSuiteRunner):
"""Custom test runner that uses nose and coverage"""
def run_tests(self, *args, **kwargs):
results = super(NoseCoverageTestRunner, self).run_tests(
*args, **kwargs)
coverage._the_coverage.data.write_file('.coverage')
return results
def runtests(*test_args):
failures = NoseCoverageTestRunner(verbosity=2, interactive=True).run_tests(
test_args)
with lcd(settings.COVERAGE_REPORT_HTML_OUTPUT_DIR):
total_line = local('grep -n Total index.html', capture=True)
match = re.search(r'^(\d+):', total_line)
total_line_number = int(match.groups()[0])
percentage_line_number = total_line_number + 4
percentage_line = local(
'awk NR=={0} index.html'.format(percentage_line_number),
capture=True)
match = re.search(r'<td>(\d.+)%</td>', percentage_line)
percentage = float(match.groups()[0])
if percentage < 100:
# abort(red('Coverage is {0}%'.format(percentage)))
pass
print(green('Coverage is {0}%'.format(percentage)))
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
| {
"content_hash": "77ad249f10186ae545145146b126efa3",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 79,
"avg_line_length": 31.59016393442623,
"alnum_prop": 0.6808510638297872,
"repo_name": "Valuehorizon/valuehorizon-holidays",
"id": "dfd14a1c9285ab73c6cf1f05c14ff26eb7826631",
"size": "1949",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "holidays/tests/runtests.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "309"
},
{
"name": "Python",
"bytes": "7118"
}
],
"symlink_target": ""
} |
import sys
import os
from os.path import join
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
for idx in range(3):
p = join('.', *['..' for l in range(idx)])
if any(['VERSION.txt' in os.listdir(p)]):
print('VERSION.txt found in {}'.format(p))
break
#sys.path.insert(0, os.path.abspath(p))
with open(join(os.path.abspath(p), 'VERSION.txt'), 'r') as version_file:
__release__ = version_file.read().strip()
__version__ = __release__.rsplit('.', 1)[0]
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Napoleon settings
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
autoclass_content = 'both'
todo_include_todos = False
todo_link_only = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyOTIC'
copyright = u'2016, Tobias Jachowski and Steve Simmert'
author = 'Tobias Jachowski and Steve Simmert'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __release__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '../logo.svg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyOTICdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'PyOTIC.tex', u'PyOTIC Documentation',
u'Tobias Jachowski and Steve Simmert', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '../logo.svg'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyotic', u'PyOTIC Documentation',
[u'Tobias Jachowski and Steve Simmert'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PyOTIC', u'PyOTIC Documentation',
u'Tobias Jachowski and Steve Simmert', 'PyOTIC',
'Calibrate optical tweezers by power spectral denstity'
'analysis and efficiantly analyze time-dependend signals.',
'Scientific/Engineering'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/3.4': None}
#intersphinx_mapping = {'python': ('https://docs.python.org/3.4', None)}
| {
"content_hash": "16b6709ae99ba6f6f4707f1363d1db68",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 79,
"avg_line_length": 31.756944444444443,
"alnum_prop": 0.7052263284495954,
"repo_name": "cellular-nanoscience/pyotic",
"id": "7783ea3c4fc1340905e1f798b491cd9dec30dbe7",
"size": "9564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "852640"
}
],
"symlink_target": ""
} |
"""
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
import ldap
from django_auth_ldap.config import LDAPSearch
from .common import * # noqa
from core.swiftmanager import SwiftManager
# Normally you should not import ANYTHING from Django directly
# into your settings, but ImproperlyConfigured is an exception.
from django.core.exceptions import ImproperlyConfigured
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w1kxu^l=@pnsf!5piqz6!!5kdcdpo79y6jebbp+2244yjm*#+k'
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/4.0/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# LOGGING CONFIGURATION
# See https://docs.djangoproject.com/en/4.0/topics/logging/ for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '[%(asctime)s] [%(levelname)s]'
'[%(name)s][%(filename)s:%(lineno)d %(funcName)s] %(message)s'
},
'simple': {
'format': '[%(asctime)s] [%(levelname)s]'
'[%(module)s %(process)d %(thread)d] %(message)s'
},
},
'handlers': {
'console_verbose': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
'console_simple': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': '/tmp/debug.log',
'formatter': 'simple'
}
},
'loggers': {
'': { # root logger
'level': 'INFO',
'handlers': ['console_simple'],
},
}
}
for app in ['collectionjson', 'core', 'feeds', 'plugins', 'plugininstances', 'pipelines',
'pipelineinstances', 'uploadedfiles', 'pacsfiles', 'servicefiles', 'users',
'filebrowser', 'workflows']:
LOGGING['loggers'][app] = {
'level': 'DEBUG',
'handlers': ['console_verbose', 'file'],
'propagate': False # required to avoid double logging with root logger
}
# Swift service settings
DEFAULT_FILE_STORAGE = 'swift.storage.SwiftStorage'
SWIFT_AUTH_URL = 'http://swift_service:8080/auth/v1.0'
SWIFT_USERNAME = 'chris:chris1234'
SWIFT_KEY = 'testing'
SWIFT_CONTAINER_NAME = 'users'
SWIFT_CONNECTION_PARAMS = {'user': SWIFT_USERNAME,
'key': SWIFT_KEY,
'authurl': SWIFT_AUTH_URL}
try:
SwiftManager(SWIFT_CONTAINER_NAME, SWIFT_CONNECTION_PARAMS).create_container()
except Exception as e:
raise ImproperlyConfigured(str(e))
# ChRIS store settings
CHRIS_STORE_URL = 'http://chris-store.local:8010/api/v1/'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES['default']['NAME'] = 'chris_dev'
DATABASES['default']['USER'] = 'chris'
DATABASES['default']['PASSWORD'] = 'Chris1234'
DATABASES['default']['TEST'] = {'NAME': 'test_chris_dev'}
DATABASES['default']['HOST'] = 'chris_dev_db'
DATABASES['default']['PORT'] = '5432'
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware']
INSTALLED_APPS += ['debug_toolbar']
INTERNAL_IPS = ['127.0.0.1',]
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['django_extensions']
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
COMPUTE_RESOURCE_URL = 'http://pfcon.remote:30005/api/v1/'
# corsheaders
# ------------------------------------------------------------------------------
CORS_ALLOW_ALL_ORIGINS = True
CORS_EXPOSE_HEADERS = ['Allow', 'Content-Type', 'Content-Length']
# Celery settings
#CELERY_BROKER_URL = 'amqp://guest:guest@localhost'
CELERY_BROKER_URL = 'amqp://queue:5672'
#: Only add pickle to this list if your broker is secured
#: from unwanted access (see userguide/security.html)
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
# Worker settings
# messages to prefetch at a time multiplied by the number of concurrent processes
# default is 4 (four messages for each process)
CELERYD_PREFETCH_MULTIPLIER = 2
# LDAP auth configuration
AUTH_LDAP = False
if AUTH_LDAP:
AUTH_LDAP_SERVER_URI = 'ldap://192.168.0.29:389'
AUTH_LDAP_BIND_DN = 'cn=admin,dc=fnndsc,dc=org'
AUTH_LDAP_BIND_PASSWORD = 'admin1234'
AUTH_LDAP_USER_SEARCH_ROOT = 'dc=fnndsc,dc=org'
AUTH_LDAP_USER_SEARCH = LDAPSearch(AUTH_LDAP_USER_SEARCH_ROOT, ldap.SCOPE_SUBTREE,
'(uid=%(user)s)')
AUTH_LDAP_USER_ATTR_MAP = {
'first_name': 'givenName',
'last_name': 'sn',
'email': 'mail'
}
AUTHENTICATION_BACKENDS = (
'django_auth_ldap.backend.LDAPBackend',
'django.contrib.auth.backends.ModelBackend',
)
| {
"content_hash": "b2ea934ffeba6af990354dee39de3f46",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 89,
"avg_line_length": 32.4804469273743,
"alnum_prop": 0.5933952528379773,
"repo_name": "FNNDSC/ChRIS_ultron_backEnd",
"id": "1b291e277b67c3cafd630f9ec03c1a19e076c0b6",
"size": "5838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chris_backend/config/settings/local.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "3051"
},
{
"name": "HTML",
"bytes": "2839"
},
{
"name": "JavaScript",
"bytes": "262"
},
{
"name": "Python",
"bytes": "978019"
},
{
"name": "Shell",
"bytes": "74679"
}
],
"symlink_target": ""
} |
"""
Copyright 2015 INFN (Italy)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = 'marco'
class ActorException(Exception):
def __init__(self, message):
self.message = message
class TransactionAlreadyExist(ActorException):pass
class TransactionDoesNotExist(ActorException):pass
class OperationInvalidInThisState(ActorException):pass
class TransactionNotInRightState(ActorException):pass
| {
"content_hash": "c44f3e26a5732fcbc5fe172a9d60c438",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 74,
"avg_line_length": 30.5,
"alnum_prop": 0.7693989071038252,
"repo_name": "INFN-Catania/FedManager",
"id": "8286d49e5c4a45b384237fce0b4cfa757abccb19",
"size": "915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "actors/error.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "34614"
},
{
"name": "Web Ontology Language",
"bytes": "7325"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url
urlpatterns = [
url(r'^$', 'course.views.all_courses', name='courses'),
url(r'^search/$', 'course.views.search'),
url(r'^(?P<course_id>\d+)/$', 'course.views.course', name='course_object'),
url(r'^subscribe/$', 'course.views.subscribe', name='subscribe'),
url(r'^(?P<course_id>\d+)/materials/', include('materials.urls', namespace='materials')),
url(r'^(?P<course_id>\d+)/analytics/', include('analytics.urls', namespace='analytics')),
url(r'^(?P<course_id>\d+)/forum/', include('forum.urls', namespace='forum')),
] | {
"content_hash": "7b2d6313a3aca52dedc83bfdc3f09a64",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 93,
"avg_line_length": 48.916666666666664,
"alnum_prop": 0.6337308347529813,
"repo_name": "starkdee/courseware",
"id": "b67b63236d11d7fde1fd1a12cc9e9e6fd2f15175",
"size": "587",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "course/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3670"
},
{
"name": "HTML",
"bytes": "24011"
},
{
"name": "JavaScript",
"bytes": "3653"
},
{
"name": "Python",
"bytes": "61761"
},
{
"name": "Ruby",
"bytes": "977"
}
],
"symlink_target": ""
} |
"""
__author__ = 'duzhipeng'
__mtime__ = '16/8/11'
# code is far away from bugs with the god animal protecting
I love animals. They taste delicious.
┏┓ ┏┓
┏┛┻━━━┛┻┓
┃ ☃ ┃
┃ ┳┛ ┗┳ ┃
┃ ┻ ┃
┗━┓ ┏━┛
┃ ┗━━━┓
┃ 神兽保佑 ┣┓
┃ 永无BUG! ┏┛
┗┓┓┏━┳┓┏┛
┃┫┫ ┃┫┫
┗┻┛ ┗┻┛
"""
from app import app, config
from . import front
from flask import render_template, request, send_from_directory, url_for
from ..models import Config
import json
import time
from werkzeug.utils import secure_filename
import os
@front.route('/commit/success')
def commit_success():
old_title = Config.query.filter_by(key='title').first()
old_subtitle = Config.query.filter_by(key='subtitle').first()
web_title = old_title.value if old_title else ''
web_subtitle = old_subtitle.value if old_subtitle else ''
return render_template('front/success.html', web_title=web_title, web_subtitle=web_subtitle)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in config.ALLOWED_EXTENSIONS
# 文件下载接口
@front.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename)
# 文件接口
@front.route('/upload', methods=['POST'])
def upload():
file = request.files['detail_img']
path = app.config['UPLOAD_FOLDER']
if file and allowed_file(file.filename):
filename = str(time.time()) + secure_filename(file.filename)
path = os.path.join(path, filename)
file.save(path)
print()
return_info = {"success": "true", "file_path": url_for('.uploaded_file', filename=filename)}
return json.dumps(return_info)
@front.route('/robots.txt')
def static_from_root():
return send_from_directory(app.static_folder, request.path[1:])
@front.route('/favicon.ico')
def static_from_favicon():
return send_from_directory(app.static_folder, request.path[1:]) | {
"content_hash": "45b08dc466d1e5ff6bfc027369da37c4",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 96,
"avg_line_length": 29.943661971830984,
"alnum_prop": 0.5804327375352775,
"repo_name": "duzhipeng/Ddesk",
"id": "ef689f867a86091bcec0f06a8fdf76532c2382e4",
"size": "2314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/front/other.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2435"
},
{
"name": "HTML",
"bytes": "133010"
},
{
"name": "JavaScript",
"bytes": "2298"
},
{
"name": "Python",
"bytes": "75527"
}
],
"symlink_target": ""
} |
from chai import Chai
from markhov_chain import MarkhovChain
class ApiTests(Chai):
def setUp(self):
super(ApiTests, self).setUp()
self.mc = MarkhovChain()
def test_add_transition(self):
self.mc.add_transition("A", "B")
self.mc.add_transition("A", "B")
self.mc.add_transition("A", "B")
self.mc.add_transition("B", "A")
self.mc.add_transition("B", "C")
self.mc.add_transition("C", "A")
expected = {
'A': { 'B': 3},
'B': { 'A': 1, 'C': 1 },
'C': { 'A': 1 }
}
assertEqual(self.mc._transition_dump(), expected)
def test_remove_transition_singles(self):
self.mc.add_transition("A", "B")
self.mc.add_transition("A", "B")
self.mc.add_transition("A", "B")
self.mc.add_transition("B", "A")
self.mc.add_transition("B", "C")
self.mc.add_transition("C", "A")
self.mc.remove_transition("A", "B")
self.mc.remove_transition("B", "A")
self.mc.remove_transition("B", "A")
self.mc.remove_transition("C", "A")
expected = {
'A': { 'B': 2 },
'B': { 'C': 1 }
}
assertEqual(self.mc._transition_dump(), expected)
def test_remove_transition_all(self):
self.mc.add_transition("A", "B")
self.mc.add_transition("A", "B")
self.mc.add_transition("A", "B")
self.mc.add_transition("B", "A")
self.mc.add_transition("B", "C")
self.mc.add_transition("B", "C")
self.mc.add_transition("B", "C")
self.mc.add_transition("C", "A")
self.mc.remove_transition("A", "B", all=True)
self.mc.remove_transition("B", "A", all=True)
self.mc.remove_transition("B", "A", all=True)
self.mc.remove_transition("B", "C", all=True)
self.mc.remove_transition("B", "C", all=True)
expected = {
'C': { 'A': 1 }
}
assertEqual(self.mc._transition_dump(), expected)
| {
"content_hash": "b50edc472d3d89c55ee45090fc1d7c19",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 51,
"avg_line_length": 22.153846153846153,
"alnum_prop": 0.6024305555555556,
"repo_name": "ownaginatious/markhov-chain",
"id": "1bf46aaa6ab9e14cc4a7054440dee43e7bc7fa71",
"size": "1728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/api_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6431"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import math
from rpython.rlib import rfloat
from topaz.module import ModuleDef, ClassDef
from topaz.objects.exceptionobject import W_StandardError, new_exception_allocate
class Math(object):
moduledef = ModuleDef("Math")
@moduledef.setup_module
def setup_module(space, w_mod):
space.set_const(w_mod, "PI", space.newfloat(math.pi))
space.set_const(w_mod, "E", space.newfloat(math.e))
space.set_const(w_mod, "DomainError", space.getclassfor(W_DomainError))
@moduledef.function("acos", value="float")
def method_acos(self, space, value):
return space.newfloat(math.acos(value))
@moduledef.function("acosh", value="float")
def method_acosh(self, space, value):
try:
res = math.acosh(value)
except ValueError:
raise space.error(space.getclassfor(W_DomainError), 'Numerical argument is out of domain - "acosh"')
return space.newfloat(res)
@moduledef.function("asin", value="float")
def method_asin(self, space, value):
return space.newfloat(math.asin(value))
@moduledef.function("asinh", value="float")
def method_asinh(self, space, value):
return space.newfloat(math.asinh(value))
@moduledef.function("atan", value="float")
def method_atan(self, space, value):
return space.newfloat(math.atan(value))
@moduledef.function("atan2", value1="float", value2="float")
def method_atan2(self, space, value1, value2):
return space.newfloat(math.atan2(value1, value2))
@moduledef.function("atanh", value="float")
def method_atanh(self, space, value):
try:
res = math.atanh(value)
except ValueError:
if value == 1.0 or value == -1.0:
# produce an infinity with the right sign
res = rfloat.copysign(rfloat.INFINITY, value)
else:
raise space.error(space.getclassfor(W_DomainError), 'Numerical argument is out of domain - "atanh"')
return space.newfloat(res)
@moduledef.function("cbrt", value="float")
def method_cbrt(self, space, value):
if value < 0:
return space.newfloat(-math.pow(-value, 1.0 / 3.0))
else:
return space.newfloat(math.pow(value, 1.0 / 3.0))
@moduledef.function("cos", value="float")
def method_cos(self, space, value):
return space.newfloat(math.cos(value))
@moduledef.function("cosh", value="float")
def method_cosh(self, space, value):
try:
res = math.cosh(value)
except OverflowError:
res = rfloat.copysign(rfloat.INFINITY, value)
return space.newfloat(res)
@moduledef.function("exp", value="float")
def method_exp(self, space, value):
return space.newfloat(math.exp(value))
@moduledef.function("frexp", value="float")
def method_frexp(self, space, value):
mant, exp = math.frexp(value)
w_mant = space.newfloat(mant)
w_exp = space.newint(exp)
return space.newarray([w_mant, w_exp])
@moduledef.function("gamma", value="float")
def method_gamma(self, space, value):
try:
res = rfloat.gamma(value)
except ValueError:
if value == 0.0:
# produce an infinity with the right sign
res = rfloat.copysign(rfloat.INFINITY, value)
else:
raise space.error(space.getclassfor(W_DomainError), 'Numerical argument is out of domain - "gamma"')
except OverflowError:
res = rfloat.INFINITY
return space.newfloat(res)
@moduledef.function("lgamma", value="float")
def method_lgamma(self, space, value):
try:
res = rfloat.lgamma(value)
except (ValueError, OverflowError):
res = rfloat.INFINITY
gamma = (1 if value == -1 or math.isnan(value) else
space.float_w(space.send(self, "gamma", [space.newfloat(value)])))
sign = 1 if gamma > 0 else -1 if gamma < 0 else 0
return space.newarray([space.newfloat(res), space.newint(sign)])
@moduledef.function("hypot", value1="float", value2="float")
def method_hypot(self, space, value1, value2):
return space.newfloat(math.hypot(value1, value2))
@moduledef.function("ldexp", value1="float", value2="int")
def method_ldexp(self, space, value1, value2):
return space.newfloat(math.ldexp(value1, value2))
@moduledef.function("log", value="float", base="float")
def method_log(self, space, value, base=math.e):
try:
res = 0.0
if base == math.e:
res = math.log(value)
else:
res = math.log(value) / math.log(base)
except ValueError:
if value == 0.0:
res = float(-rfloat.INFINITY)
else:
raise space.error(space.getclassfor(W_DomainError), 'Numerical argument is out of domain - "log"')
return space.newfloat(res)
@moduledef.function("log10", value="float")
def method_log10(self, space, value):
try:
res = math.log10(value)
except ValueError:
if value == 0.0:
res = float(-rfloat.INFINITY)
else:
raise space.error(space.getclassfor(W_DomainError), 'Numerical argument is out of domain - "log10"')
return space.newfloat(res)
@moduledef.function("log2", value="float")
def method_log2(self, space, value):
try:
res = math.log(value) / math.log(2)
except ValueError:
if value == 0.0:
res = float(-rfloat.INFINITY)
else:
raise space.error(space.getclassfor(W_DomainError), 'Numerical argument is out of domain - "log2"')
return space.newfloat(res)
@moduledef.function("sin", value="float")
def method_sin(self, space, value):
return space.newfloat(math.sin(value))
@moduledef.function("sinh", value="float")
def method_sinh(self, space, value):
try:
res = math.sinh(value)
except OverflowError:
res = rfloat.copysign(rfloat.INFINITY, value)
return space.newfloat(res)
@moduledef.function("sqrt", value="float")
def method_sqrt(self, space, value):
return space.newfloat(math.sqrt(value))
@moduledef.function("tan", value="float")
def method_tan(self, space, value):
try:
res = math.tan(value)
except ValueError:
res = rfloat.NAN
return space.newfloat(res)
@moduledef.function("tanh", value="float")
def method_tanh(self, space, value):
return space.newfloat(math.tanh(value))
@moduledef.function("erf", value="float")
def method_erf(self, space, value):
return space.newfloat(rfloat.erf(value))
@moduledef.function("erfc", value="float")
def method_erfc(self, space, value):
return space.newfloat(rfloat.erfc(value))
class W_DomainError(W_StandardError):
classdef = ClassDef("Math::DomainError", W_StandardError.classdef)
method_allocate = new_exception_allocate(classdef)
| {
"content_hash": "0503d846ba6c6a6b2d013b4584a94667",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 116,
"avg_line_length": 35.82178217821782,
"alnum_prop": 0.6104201216141515,
"repo_name": "kachick/topaz",
"id": "07eb60e1705b819d32aa53d17194d19e39d96ccb",
"size": "7236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "topaz/modules/math.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1112159"
},
{
"name": "Ruby",
"bytes": "199941"
},
{
"name": "Shell",
"bytes": "7755"
}
],
"symlink_target": ""
} |
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.compat import compat as forward_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.data.util import structure
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import server_lib
from tensorflow.python.training.checkpointable import util as checkpointable_utils
from tensorflow.python.util import compat
class IteratorTest(test.TestCase, parameterized.TestCase):
def testNoGradients(self):
component = constant_op.constant([1.])
side = constant_op.constant(0.)
add = lambda x: x + side
dataset = dataset_ops.Dataset.from_tensor_slices(component).map(add)
value = dataset.make_one_shot_iterator().get_next()
self.assertIsNone(gradients_impl.gradients(value, component)[0])
self.assertIsNone(gradients_impl.gradients(value, side)[0])
self.assertIsNone(gradients_impl.gradients(value, [component, side])[0])
def testCapturingStateInOneShotRaisesException(self):
var = variables.Variable(37.0, name="myvar")
dataset = (
dataset_ops.Dataset.from_tensor_slices([0.0, 1.0, 2.0])
.map(lambda x: x + var))
with self.assertRaisesRegexp(
ValueError, r"`Dataset.make_one_shot_iterator\(\)` does not support "
"datasets that capture stateful objects.+myvar"):
dataset.make_one_shot_iterator()
def testOneShotIterator(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = (
dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(14).make_one_shot_iterator())
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.cached_session() as sess:
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testOneShotIteratorCaptureByValue(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
tensor_components = tuple([ops.convert_to_tensor(c) for c in components])
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = (
dataset_ops.Dataset.from_tensor_slices(tensor_components)
.map(_map_fn).repeat(14).make_one_shot_iterator())
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.cached_session() as sess:
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testOneShotIteratorInsideContainer(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def within_container():
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = (
dataset_ops.Dataset.from_tensor_slices(components)
.map(_map_fn).repeat(14).make_one_shot_iterator())
return iterator.get_next()
server = server_lib.Server.create_local_server()
# Create two iterators within unique containers, and run them to
# make sure that the resources aren't shared.
#
# The test below would fail if cname were the same across both
# sessions.
for j in range(2):
with session.Session(server.target) as sess:
cname = "iteration%d" % j
with ops.container(cname):
get_next = within_container()
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testOneShotIteratorNonBlocking(self):
dataset = dataset_ops.Dataset.from_tensors([1, 2, 3]).map(lambda x: x * x)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
# Create a session with a single thread to ensure that the
# one-shot iterator initializer does not deadlock.
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=1, use_per_session_threads=True)
with session.Session(config=config) as sess:
self.assertAllEqual([1, 4, 9], sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
# Test with multiple threads invoking the one-shot iterator concurrently.
with session.Session(config=config) as sess:
results = []
def consumer_thread():
try:
results.append(sess.run(next_element))
except errors.OutOfRangeError:
results.append(None)
num_threads = 8
threads = [
self.checkedThread(consumer_thread) for _ in range(num_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(num_threads, len(results))
self.assertEqual(num_threads - 1,
len([None for r in results if r is None]))
self.assertAllEqual([[1, 4, 9]], [r for r in results if r is not None])
def testOneShotIteratorInitializerFails(self):
# Define a dataset whose initialization will always fail.
dataset = dataset_ops.Dataset.from_tensors(
array_ops.check_numerics(
constant_op.constant(1.0) / constant_op.constant(0.0), "oops"))
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with self.cached_session() as sess:
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
sess.run(next_element)
# Test that subsequent attempts to use the iterator also fail.
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
sess.run(next_element)
with self.cached_session() as sess:
def consumer_thread():
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
sess.run(next_element)
num_threads = 8
threads = [
self.checkedThread(consumer_thread) for _ in range(num_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
def testSimpleSharedResource(self):
components = (np.array(1, dtype=np.int64),
np.array([1, 2, 3], dtype=np.int64),
np.array(37.0, dtype=np.float64))
server = server_lib.Server.create_local_server()
# Create two non-overlapping sessions that share the same iterator
# resource on the same server, and verify that an action of the
# first session (initializing the iterator) is visible in the
# second session.
with ops.Graph().as_default():
iterator = (
dataset_ops.Dataset.from_tensors(components)
.map(lambda x, y, z: (x, y, z)).make_initializable_iterator(
shared_name="shared_iterator"))
init_op = iterator.initializer
get_next = iterator.get_next()
with session.Session(server.target) as sess:
sess.run(init_op)
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Re-initialize the iterator in the first session.
sess.run(init_op)
with ops.Graph().as_default():
# Re-define the iterator manually, without defining any of the
# functions in this graph, to ensure that we are not
# accidentally redefining functions with the same names in the
# new graph.
iterator = iterator_ops.Iterator.from_structure(
shared_name="shared_iterator",
output_types=(dtypes.int64, dtypes.int64, dtypes.float64),
output_shapes=([], [3], []))
get_next = iterator.get_next()
with session.Session(server.target) as sess:
# Use the iterator without re-initializing in the second session.
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testNotInitializedError(self):
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
iterator = (
dataset_ops.Dataset.from_tensors(components)
.make_initializable_iterator())
get_next = iterator.get_next()
with self.cached_session() as sess:
with self.assertRaisesRegexp(errors.FailedPreconditionError,
"iterator has not been initialized"):
sess.run(get_next)
def testReinitializableIterator(self):
dataset_3 = dataset_ops.Dataset.from_tensors(
constant_op.constant([1, 2, 3]))
dataset_4 = dataset_ops.Dataset.from_tensors(
constant_op.constant([4, 5, 6, 7]))
iterator = iterator_ops.Iterator.from_structure(dataset_3.output_types,
[None])
dataset_3_init_op = iterator.make_initializer(dataset_3)
dataset_4_init_op = iterator.make_initializer(dataset_4)
get_next = iterator.get_next()
self.assertEqual(dataset_3.output_types, iterator.output_types)
self.assertEqual(dataset_4.output_types, iterator.output_types)
self.assertEqual([None], iterator.output_shapes.as_list())
with self.cached_session() as sess:
# The iterator is initially uninitialized.
with self.assertRaises(errors.FailedPreconditionError):
sess.run(get_next)
# Initialize with one dataset.
sess.run(dataset_3_init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Initialize with a different dataset.
sess.run(dataset_4_init_op)
self.assertAllEqual([4, 5, 6, 7], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Reinitialize with the first dataset.
sess.run(dataset_3_init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testReinitializableIteratorWithFunctions(self):
def g():
for i in range(10):
yield i
iterator = iterator_ops.Iterator.from_structure(dtypes.int64, [])
next_element = iterator.get_next()
with self.cached_session() as sess:
dataset_1 = dataset_ops.Dataset.from_generator(
g, output_types=dtypes.int64)
sess.run(iterator.make_initializer(dataset_1))
for expected in range(10):
self.assertEqual(expected, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
dataset_2 = dataset_ops.Dataset.from_generator(
g, output_types=dtypes.int64)
sess.run(iterator.make_initializer(dataset_2))
for expected in range(10):
self.assertEqual(expected, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testReinitializableIteratorStaticErrors(self):
# Non-matching structure for types and shapes.
with self.assertRaises(TypeError):
iterator = iterator_ops.Iterator.from_structure(
(dtypes.int64, dtypes.float64), [None])
# Test validation of dataset argument.
iterator = iterator_ops.Iterator.from_structure((dtypes.int64,
dtypes.float64))
# Incompatible structure.
with self.assertRaises(ValueError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(((constant_op.constant(
[1, 2, 3], dtype=dtypes.int64),), (constant_op.constant(
[4., 5., 6., 7.], dtype=dtypes.float64),))))
# Incompatible types.
with self.assertRaises(TypeError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(
(constant_op.constant([1, 2, 3], dtype=dtypes.int32),
constant_op.constant([4., 5., 6., 7.], dtype=dtypes.float32))))
# Incompatible shapes.
iterator = iterator_ops.Iterator.from_structure(
(dtypes.int64, dtypes.float64), ([None], []))
with self.assertRaises(TypeError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(
(constant_op.constant([1, 2, 3], dtype=dtypes.int64),
constant_op.constant([4., 5., 6., 7.], dtype=dtypes.float64))))
def testIteratorStringHandle(self):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
dataset_4 = dataset_ops.Dataset.from_tensor_slices([10, 20, 30, 40])
iterator_3 = dataset_3.make_one_shot_iterator()
iterator_4 = dataset_4.make_one_shot_iterator()
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_iterator = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dataset_3.output_types, dataset_3.output_shapes)
next_element = feedable_iterator.get_next()
self.assertEqual(dataset_3.output_types, feedable_iterator.output_types)
self.assertEqual(dataset_4.output_types, feedable_iterator.output_types)
self.assertEqual([], feedable_iterator.output_shapes)
with self.cached_session() as sess:
iterator_3_handle = sess.run(iterator_3.string_handle())
iterator_4_handle = sess.run(iterator_4.string_handle())
self.assertEqual(10,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(1,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(20,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(2,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(30,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(3,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(40,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_3_handle})
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_4_handle})
def testIteratorStringHandleFuture(self):
with forward_compat.forward_compatibility_horizon(2018, 8, 4):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
dataset_4 = dataset_ops.Dataset.from_tensor_slices([10, 20, 30, 40])
iterator_3 = dataset_3.make_one_shot_iterator()
iterator_4 = dataset_4.make_one_shot_iterator()
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_iterator = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dataset_3.output_types, dataset_3.output_shapes)
next_element = feedable_iterator.get_next()
self.assertEqual(dataset_3.output_types, feedable_iterator.output_types)
self.assertEqual(dataset_4.output_types, feedable_iterator.output_types)
self.assertEqual([], feedable_iterator.output_shapes)
with self.cached_session() as sess:
iterator_3_handle = sess.run(iterator_3.string_handle())
iterator_4_handle = sess.run(iterator_4.string_handle())
self.assertEqual(
10,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
1,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
20,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
2,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
30,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
3,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
40,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_3_handle})
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_4_handle})
def testIteratorStringHandleReuseTensorObject(self):
dataset = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
one_shot_iterator = dataset.make_one_shot_iterator()
initializable_iterator = dataset.make_initializable_iterator()
structure_iterator = iterator_ops.Iterator.from_structure(
dataset.output_types)
created_ops = len(ops.get_default_graph().get_operations())
self.assertIs(one_shot_iterator.string_handle(),
one_shot_iterator.string_handle())
self.assertIs(initializable_iterator.string_handle(),
initializable_iterator.string_handle())
self.assertIs(structure_iterator.string_handle(),
structure_iterator.string_handle())
# Assert that getting the (default) string handle creates no ops.
self.assertEqual(created_ops, len(ops.get_default_graph().get_operations()))
# Specifying an explicit name will create a new op.
handle_with_name = one_shot_iterator.string_handle(name="foo")
self.assertEqual("foo", handle_with_name.op.name)
self.assertIsNot(one_shot_iterator.string_handle(), handle_with_name)
handle_with_same_name = one_shot_iterator.string_handle(name="foo")
self.assertEqual("foo_1", handle_with_same_name.op.name)
self.assertIsNot(handle_with_name, handle_with_same_name)
def testIteratorStringHandleError(self):
dataset_int_scalar = (
dataset_ops.Dataset.from_tensor_slices([1, 2, 3]).repeat())
dataset_float_vector = (dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0]))
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_int_scalar = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32, [])
feedable_int_vector = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32, [None])
feedable_int_any = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32)
with self.cached_session() as sess:
handle_int_scalar = sess.run(
dataset_int_scalar.make_one_shot_iterator().string_handle())
handle_float_vector = sess.run(
dataset_float_vector.make_one_shot_iterator().string_handle())
self.assertEqual(1,
sess.run(
feedable_int_scalar.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
self.assertEqual(2,
sess.run(
feedable_int_any.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
with self.assertRaises(errors.InvalidArgumentError):
print(sess.run(
feedable_int_vector.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
with self.assertRaises(errors.InvalidArgumentError):
print(sess.run(
feedable_int_vector.get_next(),
feed_dict={handle_placeholder: handle_float_vector}))
def testRemoteIteratorUsingRemoteCallOpDirectSession(self):
worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 3
with ops.device("/job:localhost/replica:0/task:0/cpu:1"):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
iterator_3 = dataset_3.make_one_shot_iterator()
iterator_3_handle = iterator_3.string_handle()
@function.Defun(dtypes.string)
def _remote_fn(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, dataset_3.output_types, dataset_3.output_shapes)
return remote_iterator.get_next()
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
target_placeholder = array_ops.placeholder(dtypes.string, shape=[])
remote_op = functional_ops.remote_call(
args=[iterator_3_handle],
Tout=[dtypes.int32],
f=_remote_fn,
target=target_placeholder)
with self.session(config=worker_config) as sess:
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [1])
# Fails when target is cpu:2 where the resource is not located.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:2"
})
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [2])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [3])
with self.assertRaises(errors.OutOfRangeError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
def testRemoteIteratorUsingRemoteCallOpMultiWorkers(self):
s1 = server_lib.Server.create_local_server()
s2 = server_lib.Server.create_local_server()
s3 = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
workers = cluster_def.job.add()
workers.name = "worker"
workers.tasks[0] = s1.target[len("grpc://"):]
workers.tasks[1] = s2.target[len("grpc://"):]
client = cluster_def.job.add()
client.name = "client"
client.tasks[0] = s3.target[len("grpc://"):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
worker_devices = [
"/job:worker/replica:0/task:%d/cpu:0" % i for i in range(2)
]
itr_handles = []
for device in worker_devices:
with ops.device(device):
src = dataset_ops.Dataset.from_tensor_slices([device])
itr = src.make_one_shot_iterator()
itr_handles.append(itr.string_handle())
targets = dataset_ops.Dataset.from_tensor_slices(worker_devices)
handles = dataset_ops.Dataset.from_tensor_slices(itr_handles)
@function.Defun(dtypes.string)
def loading_func(h):
remote_itr = iterator_ops.Iterator.from_string_handle(
h, itr.output_types, itr.output_shapes)
return remote_itr.get_next()
def map_fn(target, handle):
return functional_ops.remote_call(
args=[handle], Tout=[dtypes.string], f=loading_func, target=target)
with ops.device("/job:client"):
client_dataset = dataset_ops.Dataset.zip((targets, handles)).map(map_fn)
itr = client_dataset.make_initializable_iterator()
n = itr.get_next()
with session.Session(s3.target, config=config) as sess:
sess.run(itr.initializer)
expected_values = worker_devices
for expected in expected_values:
self.assertEqual((compat.as_bytes(expected),), sess.run(n))
with self.assertRaises(errors.OutOfRangeError):
sess.run(n)
def testRemoteIteratorUsingRemoteCallOpDirectSessionGPUCPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
iterator_3 = dataset_3.make_one_shot_iterator()
iterator_3_handle = iterator_3.string_handle()
def _encode_raw(byte_array):
return bytes(bytearray(byte_array))
@function.Defun(dtypes.uint8)
def _remote_fn(h):
handle = script_ops.py_func(_encode_raw, [h], dtypes.string)
remote_iterator = iterator_ops.Iterator.from_string_handle(
handle, dataset_3.output_types, dataset_3.output_shapes)
return remote_iterator.get_next()
with ops.device("/job:localhost/replica:0/task:0/device:GPU:0"):
target_placeholder = array_ops.placeholder(dtypes.string, shape=[])
iterator_3_handle_uint8 = parsing_ops.decode_raw(
bytes=iterator_3_handle, out_type=dtypes.uint8)
remote_op = functional_ops.remote_call(
args=[iterator_3_handle_uint8],
Tout=[dtypes.int32],
f=_remote_fn,
target=target_placeholder)
with self.cached_session() as sess:
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [1])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [2])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [3])
with self.assertRaises(errors.OutOfRangeError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
def testIncorrectIteratorRestore(self):
def _path():
return os.path.join(self.get_temp_dir(), "iterator")
def _save_op(iterator_resource):
iterator_state_variant = gen_dataset_ops.serialize_iterator(
iterator_resource)
save_op = io_ops.write_file(
_path(), parsing_ops.serialize_tensor(iterator_state_variant))
return save_op
def _restore_op(iterator_resource):
iterator_state_variant = parsing_ops.parse_tensor(
io_ops.read_file(_path()), dtypes.variant)
restore_op = gen_dataset_ops.deserialize_iterator(iterator_resource,
iterator_state_variant)
return restore_op
def _build_range_dataset_graph():
start = 1
stop = 10
iterator = dataset_ops.Dataset.range(start,
stop).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = _save_op(iterator._iterator_resource)
restore_op = _restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
def _build_reader_dataset_graph():
filenames = ["test"] # Does not exist but we don't care in this test.
iterator = readers.FixedLengthRecordDataset(
filenames, 1, 0, 0).make_initializable_iterator()
init_op = iterator.initializer
get_next_op = iterator.get_next()
save_op = _save_op(iterator._iterator_resource)
restore_op = _restore_op(iterator._iterator_resource)
return init_op, get_next_op, save_op, restore_op
# Saving iterator for RangeDataset graph.
with ops.Graph().as_default() as g:
init_op, _, save_op, _ = _build_range_dataset_graph()
with self.session(graph=g) as sess:
sess.run(init_op)
sess.run(save_op)
# Attempt to restore the saved iterator into an IteratorResource of
# incompatible type. An iterator of RangeDataset has output type int64,
# while an iterator of FixedLengthRecordDataset has output type string.
# So an InvalidArgumentError should be raised by
# IteratorResource::set_iterator.
with ops.Graph().as_default() as g:
_, _, _, restore_op = _build_reader_dataset_graph()
with self.session(graph=g) as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(restore_op)
def testRepeatedGetNextWarning(self):
iterator = dataset_ops.Dataset.range(10).make_one_shot_iterator()
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
for _ in range(100):
iterator.get_next()
self.assertEqual(100 - iterator_ops.GET_NEXT_CALL_WARNING_THRESHOLD, len(w))
for warning in w:
self.assertIn(
iterator_ops.GET_NEXT_CALL_WARNING_MESSAGE, str(warning.message))
def testEagerIteratorAsync(self):
with context.eager_mode(), context.execution_mode(context.ASYNC):
val = 0
dataset = dataset_ops.Dataset.range(10)
for foo in dataset:
self.assertEqual(val, foo.numpy())
val += 1
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
("Tensor", lambda: constant_op.constant(37.0),
structure.TensorStructure(dtypes.float32, []),
ops.Tensor, dtypes.float32, []),
("SparseTensor", lambda: sparse_tensor.SparseTensor(
indices=[[0]], values=constant_op.constant([0], dtype=dtypes.int32),
dense_shape=[1]),
structure.SparseTensorStructure(dtypes.int32, [1]),
sparse_tensor.SparseTensor, dtypes.int32, [1]),
("Nest", lambda: {
"a": constant_op.constant(37.0),
"b": (constant_op.constant(["Foo"]), constant_op.constant("Bar"))},
structure.NestedStructure({
"a": structure.TensorStructure(dtypes.float32, []),
"b": (structure.TensorStructure(dtypes.string, [1]),
structure.TensorStructure(dtypes.string, []))}),
{"a": ops.Tensor, "b": (ops.Tensor, ops.Tensor)},
{"a": dtypes.float32, "b": (dtypes.string, dtypes.string)},
{"a": [], "b": ([1], [])}),
)
def testIteratorStructure(self, tf_value_fn, expected_element_structure,
expected_output_classes, expected_output_types,
expected_output_shapes):
tf_value = tf_value_fn()
iterator = dataset_ops.Dataset.from_tensors(
tf_value).make_one_shot_iterator()
self.assertTrue(expected_element_structure.is_compatible_with(
iterator._element_structure))
self.assertTrue(iterator._element_structure.is_compatible_with(
expected_element_structure))
self.assertEqual(expected_output_classes, iterator.output_classes)
self.assertEqual(expected_output_types, iterator.output_types)
self.assertEqual(expected_output_shapes, iterator.output_shapes)
def testIteratorGetNextName(self):
with ops.Graph().as_default():
iterator = dataset_ops.Dataset.from_tensors(37.0).make_one_shot_iterator()
next_element = iterator.get_next(name="overridden_name")
self.assertEqual("overridden_name", next_element.op.name)
class IteratorCheckpointingTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testSaveRestoreOneShotIterator(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
dataset = dataset_ops.Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6]).map(
math_ops.square).batch(2)
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next if context.executing_eagerly(
) else functools.partial(self.evaluate, iterator.get_next())
checkpoint = checkpointable_utils.Checkpoint(iterator=iterator)
self.assertAllEqual([1, 4], get_next())
save_path = checkpoint.save(checkpoint_prefix)
self.assertAllEqual([9, 16], get_next())
self.assertAllEqual([25, 36], get_next())
checkpoint.restore(save_path).run_restore_ops()
self.assertAllEqual([9, 16], get_next())
self.assertAllEqual([25, 36], get_next())
with self.assertRaises(errors.OutOfRangeError):
get_next()
@test_util.run_in_graph_and_eager_modes
def testSaveRestoreMultipleIterator(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
dataset = dataset_ops.Dataset.from_tensor_slices(
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
dataset = dataset.map(math_ops.square).batch(2)
iterator_1 = dataset.make_one_shot_iterator()
get_next_1 = iterator_1.get_next if context.executing_eagerly(
) else functools.partial(self.evaluate, iterator_1.get_next())
iterator_2 = dataset.make_one_shot_iterator()
get_next_2 = iterator_2.get_next if context.executing_eagerly(
) else functools.partial(self.evaluate, iterator_2.get_next())
dataset_2 = dataset_ops.Dataset.range(10)
iterator_3 = dataset_2.make_one_shot_iterator()
get_next_3 = iterator_3.get_next if context.executing_eagerly(
) else functools.partial(self.evaluate, iterator_3.get_next())
checkpoint = checkpointable_utils.Checkpoint(
iterator_1=iterator_1, iterator_2=iterator_2, iterator_3=iterator_3)
self.assertAllEqual([1, 4], get_next_1())
self.assertAllEqual(0, get_next_3())
self.assertAllEqual(1, get_next_3())
self.assertAllEqual(2, get_next_3())
save_path = checkpoint.save(checkpoint_prefix)
self.assertAllEqual([1, 4], get_next_2())
self.assertAllEqual([9, 16], get_next_2())
self.assertAllEqual(3, get_next_3())
checkpoint.restore(save_path).run_restore_ops()
self.assertAllEqual([9, 16], get_next_1())
self.assertAllEqual([1, 4], get_next_2())
self.assertAllEqual(3, get_next_3())
@test_util.run_in_graph_and_eager_modes
def testRestoreExhaustedIterator(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
dataset = dataset_ops.Dataset.range(3)
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next if context.executing_eagerly(
) else functools.partial(self.evaluate, iterator.get_next())
checkpoint = checkpointable_utils.Checkpoint(iterator=iterator)
self.assertAllEqual(0, get_next())
self.assertAllEqual(1, get_next())
save_path = checkpoint.save(checkpoint_prefix)
self.assertAllEqual(2, get_next())
checkpoint.restore(save_path).run_restore_ops()
self.assertAllEqual(2, get_next())
save_path = checkpoint.save(checkpoint_prefix)
checkpoint.restore(save_path).run_restore_ops()
with self.assertRaises(errors.OutOfRangeError):
get_next()
def testRestoreInReconstructedIteratorInitializable(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
dataset = dataset_ops.Dataset.range(10)
iterator = dataset.make_initializable_iterator()
get_next = iterator.get_next()
checkpoint = checkpointable_utils.Checkpoint(iterator=iterator)
for i in range(5):
with self.cached_session() as sess:
checkpoint.restore(checkpoint_management.latest_checkpoint(
checkpoint_directory)).initialize_or_restore(sess)
for j in range(2):
self.assertEqual(i * 2 + j, sess.run(get_next))
checkpoint.save(file_prefix=checkpoint_prefix)
if __name__ == "__main__":
test.main()
| {
"content_hash": "f7a6a0f831b28638b4f484d1fe85039f",
"timestamp": "",
"source": "github",
"line_count": 943,
"max_line_length": 82,
"avg_line_length": 40.48356309650053,
"alnum_prop": 0.6451697401508801,
"repo_name": "brchiu/tensorflow",
"id": "490ca813dcee4476f7377df83f4a1400328451e4",
"size": "38865",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/kernel_tests/iterator_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4882"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "473950"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "51674376"
},
{
"name": "CMake",
"bytes": "199085"
},
{
"name": "Dockerfile",
"bytes": "36908"
},
{
"name": "Go",
"bytes": "1285435"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "875500"
},
{
"name": "Jupyter Notebook",
"bytes": "2623054"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "63390"
},
{
"name": "Objective-C",
"bytes": "15634"
},
{
"name": "Objective-C++",
"bytes": "101475"
},
{
"name": "PHP",
"bytes": "5191"
},
{
"name": "Pascal",
"bytes": "221"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "41718475"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "490100"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
import pickle
from hashlib import md5
from datetime import datetime
from elasticsearch_dsl import document, field, Mapping
from elasticsearch_dsl.exceptions import ValidationException
from pytest import raises
class MyInner(field.InnerObjectWrapper):
pass
class MyDoc(document.DocType):
title = field.String(index='not_analyzed')
name = field.String()
created_at = field.Date()
inner = field.Object(properties={'old_field': field.String()}, doc_class=MyInner)
class MySubDoc(MyDoc):
name = field.String(index='not_analyzed')
class Meta:
doc_type = 'my_custom_doc'
index = 'default-index'
class MyDoc2(document.DocType):
extra = field.Long()
class MyMultiSubDoc(MyDoc2, MySubDoc):
pass
class DocWithNested(document.DocType):
comments = field.Nested(properties={'title': field.String()})
def test_null_value_for_object():
d = MyDoc(inner=None)
assert d.inner is None
def test_inherited_doc_types_can_override_index():
class MyDocDifferentIndex(MySubDoc):
class Meta:
index = 'not-default-index'
assert MyDocDifferentIndex._doc_type.index == 'not-default-index'
def test_to_dict_with_meta():
d = MySubDoc(title='hello')
d.meta.parent = 'some-parent'
assert {
'_index': 'default-index',
'_parent': 'some-parent',
'_type': 'my_custom_doc',
'_source': {'title': 'hello'},
} == d.to_dict(True)
def test_to_dict_with_meta_includes_custom_index():
d = MySubDoc(title='hello')
d.meta.index = 'other-index'
assert {
'_index': 'other-index',
'_type': 'my_custom_doc',
'_source': {'title': 'hello'},
} == d.to_dict(True)
def test_attribute_can_be_removed():
d = MyDoc(title='hello')
del d.title
assert 'title' not in d._d_
def test_doc_type_can_be_correctly_pickled():
d = DocWithNested(title='Hello World!', comments=[{'title': 'hellp'}], meta={'id': 42})
s = pickle.dumps(d)
d2 = pickle.loads(s)
assert d2 == d
assert 42 == d2.meta.id
assert 'Hello World!' == d2.title
assert [{'title': 'hellp'}] == d2.comments
def test_meta_is_accessible_even_on_empty_doc():
d = MyDoc()
d.meta
d = MyDoc(title='aaa')
d.meta
def test_meta_field_mapping():
class User(document.DocType):
username = field.String()
class Meta:
all = document.MetaField(enabled=False)
_index = document.MetaField(enabled=True)
dynamic = document.MetaField('strict')
dynamic_templates = document.MetaField([42])
assert {
'user': {
'properties': {
'username': {'type': 'string'}
},
'_all': {'enabled': False},
'_index': {'enabled': True},
'dynamic': 'strict',
'dynamic_templates': [42]
}
} == User._doc_type.mapping.to_dict()
def test_multi_value_fields():
class Blog(document.DocType):
tags = field.String(multi=True, index='not_analyzed')
b = Blog()
assert [] == b.tags
b.tags.append('search')
b.tags.append('python')
assert ['search', 'python'] == b.tags
def test_docs_with_properties():
class User(document.DocType):
pwd_hash = field.String()
def check_password(self, pwd):
return md5(pwd).hexdigest() == self.pwd_hash
@property
def password(self):
raise AttributeError('readonly')
@password.setter
def password(self, pwd):
self.pwd_hash = md5(pwd).hexdigest()
u = User(pwd_hash=md5(b'secret').hexdigest())
assert u.check_password(b'secret')
assert not u.check_password(b'not-secret')
u.password = b'not-secret'
assert 'password' not in u._d_
assert not u.check_password(b'secret')
assert u.check_password(b'not-secret')
with raises(AttributeError):
u.password
def test_nested_can_be_assigned_to():
d1 = DocWithNested(comments=[{'title': 'First!'}])
d2 = DocWithNested()
d2.comments = d1.comments
assert d2.comments == [{'title': 'First!'}]
assert {'comments': [{'title': 'First!'}]} == d2.to_dict()
def test_nested_defaults_to_list_and_can_be_updated():
md = DocWithNested()
assert [] == md.comments
md.comments.append({'title': 'hello World!'})
assert {'comments': [{'title': 'hello World!'}]} == md.to_dict()
def test_to_dict_is_recursive_and_can_cope_with_multi_values():
md = MyDoc(name=['a', 'b', 'c'])
md.inner = [{'old_field': 'of1'}, {'old_field': 'of2'}]
assert isinstance(md.inner[0], MyInner)
assert {
'name': ['a', 'b', 'c'],
'inner': [{'old_field': 'of1'}, {'old_field': 'of2'}],
} == md.to_dict()
def test_to_dict_ignores_empty_collections():
md = MyDoc(name='', address={}, count=0, valid=False, tags=[])
assert {'name': '', 'count': 0, 'valid': False} == md.to_dict()
def test_declarative_mapping_definition():
assert issubclass(MyDoc, document.DocType)
assert hasattr(MyDoc, '_doc_type')
assert 'my_doc' == MyDoc._doc_type.name
assert {
'my_doc': {
'properties': {
'created_at': {'type': 'date'},
'name': {'type': 'string'},
'title': {'index': 'not_analyzed', 'type': 'string'},
'inner': {
'type': 'object',
'properties': {'old_field': {'type': 'string'}}
}
}
}
} == MyDoc._doc_type.mapping.to_dict()
def test_you_can_supply_own_mapping_instance():
class MyD(document.DocType):
title = field.String()
class Meta:
mapping = Mapping('my_d')
mapping.meta('_all', enabled=False)
assert {
'my_d': {
'_all': {'enabled': False},
'properties': {'title': {'type': 'string'}}
}
} == MyD._doc_type.mapping.to_dict()
def test_document_can_be_created_dynamicaly():
n = datetime.now()
md = MyDoc(title='hello')
md.name = 'My Fancy Document!'
md.created_at = n
inner = md.inner
# consistent returns
assert inner is md.inner
inner.old_field = 'Already defined.'
md.inner.new_field = ['undefined', 'field']
assert {
'title': 'hello',
'name': 'My Fancy Document!',
'created_at': n,
'inner': {
'old_field': 'Already defined.',
'new_field': ['undefined', 'field']
}
} == md.to_dict()
def test_invalid_date_will_raise_exception():
md = MyDoc()
with raises(ValidationException):
md.created_at = 'not-a-date'
def test_document_inheritance():
assert issubclass(MySubDoc, MyDoc)
assert issubclass(MySubDoc, document.DocType)
assert hasattr(MySubDoc, '_doc_type')
assert 'my_custom_doc' == MySubDoc._doc_type.name
assert {
'my_custom_doc': {
'properties': {
'created_at': {'type': 'date'},
'name': {'type': 'string', 'index': 'not_analyzed'},
'title': {'index': 'not_analyzed', 'type': 'string'},
'inner': {
'type': 'object',
'properties': {'old_field': {'type': 'string'}}
}
}
}
} == MySubDoc._doc_type.mapping.to_dict()
def test_meta_fields_are_stored_in_meta_and_ignored_by_to_dict():
md = MySubDoc(meta={'id': 42}, name='My First doc!')
md.meta.index = 'my-index'
assert md.meta.index == 'my-index'
assert md.meta.id == 42
assert {'name': 'My First doc!'} == md.to_dict()
assert {'id': 42, 'index': 'my-index'} == md.meta.to_dict()
def test_meta_inheritance():
assert issubclass(MyMultiSubDoc, MySubDoc)
assert issubclass(MyMultiSubDoc, MyDoc2)
assert issubclass(MyMultiSubDoc, document.DocType)
assert hasattr(MyMultiSubDoc, '_doc_type')
# doc_type should not be inherited
assert 'my_multi_sub_doc' == MyMultiSubDoc._doc_type.name
# index and using should be
assert MyMultiSubDoc._doc_type.index == MySubDoc._doc_type.index
assert MyMultiSubDoc._doc_type.using == MySubDoc._doc_type.using
assert {
'my_multi_sub_doc': {
'properties': {
'created_at': {'type': 'date'},
'name': {'type': 'string', 'index': 'not_analyzed'},
'title': {'index': 'not_analyzed', 'type': 'string'},
'inner': {
'type': 'object',
'properties': {'old_field': {'type': 'string'}}
},
'extra': {'type': 'long'}
}
}
} == MyMultiSubDoc._doc_type.mapping.to_dict()
def test_meta_fields_can_be_accessed_directly_with_underscore():
p = object()
md = MyDoc(_id=42, title='Hello World!')
md._parent = p
assert md.meta.id == 42
assert md._id == 42
assert md.meta.parent is md._parent is p
def test_save_no_index(mock_client):
md = MyDoc()
with raises(ValidationException):
md.save(using='mock')
def test_delete_no_index(mock_client):
md = MyDoc()
with raises(ValidationException):
md.delete(using='mock')
def test_search_with_custom_alias_and_index(mock_client):
search_object = MyDoc.search(
using="staging",
index=["custom_index1", "custom_index2"])
assert search_object._using == "staging"
assert search_object._index == ["custom_index1", "custom_index2"]
| {
"content_hash": "de3b8dff66307ceae5291be1cfea80c8",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 91,
"avg_line_length": 29.509316770186334,
"alnum_prop": 0.5711429172805725,
"repo_name": "ziky90/elasticsearch-dsl-py",
"id": "e0f979d67ea25626fef9e1a3825d191641e1c82c",
"size": "9502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_elasticsearch_dsl/test_document.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "220040"
}
],
"symlink_target": ""
} |
from sklearn.externals import joblib
class SkMnist(object):
def __init__(self):
self.class_names = ["class:{}".format(str(i)) for i in range(10)]
self.clf = joblib.load('/data/sk.pkl')
def predict(self,X,feature_names):
predictions = self.clf.predict_proba(X)
return predictions
| {
"content_hash": "8ad1cc021acd50e3e160324a908b46c3",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 73,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.6219512195121951,
"repo_name": "kubeflow/example-seldon",
"id": "8104b974990d8c8ebaeff1c8c3a1ef66183de92b",
"size": "328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/sk_mnist/runtime/SkMnist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2199"
},
{
"name": "Jupyter Notebook",
"bytes": "14326"
},
{
"name": "Makefile",
"bytes": "1154"
},
{
"name": "Python",
"bytes": "11076"
},
{
"name": "R",
"bytes": "3001"
},
{
"name": "Shell",
"bytes": "5433"
}
],
"symlink_target": ""
} |
subreddit = 'apexlegends'
t_channel = '@r_apexlegends'
def send_post(submission, r2t):
return r2t.send_simple(submission)
| {
"content_hash": "7973866b9f69cc52496f1f6b7c66f90e",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 38,
"avg_line_length": 21.333333333333332,
"alnum_prop": 0.7265625,
"repo_name": "Fillll/reddit2telegram",
"id": "010369d77636b6a7df7b4bfcc133070435480d1d",
"size": "145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reddit2telegram/channels/~inactive/r_apexlegends/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "301463"
},
{
"name": "Shell",
"bytes": "153"
}
],
"symlink_target": ""
} |
import os.path
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_path, 'ds.log'), 'a') as fp:
fp.write('Hello World')
| {
"content_hash": "74469e1ef1bb17ccf166bbee3b889c17",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 55,
"avg_line_length": 26,
"alnum_prop": 0.6538461538461539,
"repo_name": "DonJayamanne/pythonVSCode",
"id": "ace41e3f5c44cd3781dfe7d7210e6b555094ebe3",
"size": "161",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/test/pythonFiles/datascience/simple_note_book.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "569"
},
{
"name": "JavaScript",
"bytes": "28707"
},
{
"name": "Jupyter Notebook",
"bytes": "10520"
},
{
"name": "Python",
"bytes": "2602995"
},
{
"name": "Roff",
"bytes": "108"
},
{
"name": "Shell",
"bytes": "76"
},
{
"name": "TypeScript",
"bytes": "5178987"
}
],
"symlink_target": ""
} |
from gui.widgets.frames import Frame
from gui.widgets import Buttons
import constants as c
class PlusMinusFrame(Frame.Frame):
def __init__(self, parent, row, column, increase, decrease, **kwargs):
Frame.Frame.__init__(self, parent, c.PLUS_MINUS_FRAME, row, column, **kwargs)
self.addChildWidgets((
Buttons.Button(self.widget, c.MINUS, 0, 0, command=decrease, padx=0),
Buttons.Button(self.widget, c.PLUS, 0, 1, command=increase, padx=0)
))
| {
"content_hash": "d6839d22bcb50a2d1d7f6af5d9f5f18c",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 85,
"avg_line_length": 41.166666666666664,
"alnum_prop": 0.6619433198380567,
"repo_name": "kahvel/VEP-BCI",
"id": "a450ad3455fc048785d73d88aad8f7f3cab62d4e",
"size": "494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/gui/widgets/frames/PlusMinusFrame.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "214297"
}
],
"symlink_target": ""
} |
plt.close('all') | {
"content_hash": "846ad4a1ef22966c8b3ae30e2d616266",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 16,
"avg_line_length": 16,
"alnum_prop": 0.6875,
"repo_name": "leesavide/pythonista-docs",
"id": "2c1b607070d3ff4b8c06e2fc89f7309d5680f038",
"size": "56",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Documentation/matplotlib/users/recipes-1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "84392"
},
{
"name": "HTML",
"bytes": "70040156"
},
{
"name": "JavaScript",
"bytes": "89777"
},
{
"name": "Python",
"bytes": "884325"
}
],
"symlink_target": ""
} |
from JumpScale import j
base = j.tools.cuisine._getBaseClassLoader()
class systemservices(base):
def __init__(self, executor, cuisine):
base.__init__(self, executor, cuisine)
| {
"content_hash": "c4e3d047ed7596376a1fd2c7ae4dec6a",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 46,
"avg_line_length": 21.22222222222222,
"alnum_prop": 0.6858638743455497,
"repo_name": "Jumpscale/jumpscale_core8",
"id": "68ef182b458ce4e6e91d8d4ed8dceb579403b834",
"size": "191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/JumpScale/tools/cuisine/systemservices/systemservices.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1113"
},
{
"name": "Cap'n Proto",
"bytes": "9033"
},
{
"name": "Lua",
"bytes": "12538"
},
{
"name": "Python",
"bytes": "4343122"
},
{
"name": "Shell",
"bytes": "7091"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.