text stringlengths 4 1.02M | meta dict |
|---|---|
"""Experimenter base class for problem statement and evaluation.
Experimenters represent black-box optimization problems and/or users.
Each experimenter defines a ProblemStatement, representing the search space and
the metrics it returns in Evaluate (via CompletedTrials).
Pseudo-code for using Experimenters with Vizier Designers:
exp = ExperimenterSubClass(...) # Possibly configure the experimenter.
problem_statement = exp.problem_statement()
designer = Designer(problem_statement) # Configure the search algorithm
for i in range(10):
suggestions = designer.suggest(count=2)
exp.evaluate(suggestions) # Evaluate in-place, for maximum flexibility.
designer.update(suggestions)
"""
import abc
from typing import Sequence
from vizier import pyvizier
class Experimenter(metaclass=abc.ABCMeta):
"""Abstract base class for Experimenters."""
@abc.abstractmethod
def evaluate(self, suggestions: Sequence[pyvizier.Trial]):
"""Evaluates and mutates the Trials in-place.
NOTE: The Experimenter is expected to mutate and/or complete the Trials as
they wish, as to simulate users to maximum flexibility.
Args:
suggestions: Sequence of Trials to be evaluated.
"""
pass
@abc.abstractmethod
def problem_statement(self) -> pyvizier.ProblemStatement:
"""The search configuration generated by this experimenter."""
pass
| {
"content_hash": "fc6383bbc3c0d67d5c26586dc63a2583",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 79,
"avg_line_length": 32.666666666666664,
"alnum_prop": 0.7667638483965015,
"repo_name": "google/vizier",
"id": "d75ffcec73751da877e353bb2c7d42d5c6a42406",
"size": "1948",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "vizier/_src/benchmarks/experimenters/experimenter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1282546"
},
{
"name": "Shell",
"bytes": "2440"
}
],
"symlink_target": ""
} |
"""Mocking utilities for testing"""
import json
import os
import re
import uuid
from io import BytesIO
from unittest.mock import Mock
from urllib.parse import parse_qs, urlparse
import pytest
from tornado import web
from tornado.httpclient import HTTPResponse
from tornado.httputil import HTTPServerRequest
from tornado.log import app_log
from tornado.simple_httpclient import SimpleAsyncHTTPClient
RegExpType = type(re.compile('.'))
class MockAsyncHTTPClient(SimpleAsyncHTTPClient):
"""A mock AsyncHTTPClient that allows registering handlers for mocked requests
Call .add_host to mock requests made to a given host.
"""
def initialize(self, *args, **kwargs):
super().initialize(*args, **kwargs)
self.hosts = {}
def add_host(self, host, paths):
"""Add a host whose requests should be mocked.
Args:
host (str): the host to mock (e.g. 'api.github.com')
paths (list(str|regex, callable)): a list of paths (or regexps for paths)
and callables to be called for those paths.
The mock handlers will receive the request as their only argument.
Mock handlers can return:
- None
- int (empty response with this status code)
- str, bytes for raw response content (status=200)
- list, dict for JSON response (status=200)
- HTTPResponse (passed unmodified)
Example::
client.add_host('api.github.com', [
('/user': lambda request: {'login': 'name'})
])
"""
self.hosts[host] = paths
def fetch_impl(self, request, response_callback):
urlinfo = urlparse(request.url)
host = urlinfo.hostname
if host not in self.hosts:
app_log.warning("Not mocking request to %s", request.url)
return super().fetch_impl(request, response_callback)
paths = self.hosts[host]
response = None
for path_spec, handler in paths:
if isinstance(path_spec, str):
if path_spec == urlinfo.path:
response = handler(request)
break
else:
if path_spec.match(urlinfo.path):
response = handler(request)
break
if response is None:
response = HTTPResponse(request=request, code=404, reason=request.url)
elif isinstance(response, int):
response = HTTPResponse(request=request, code=response)
elif isinstance(response, bytes):
response = HTTPResponse(
request=request,
code=200,
buffer=BytesIO(response),
)
elif isinstance(response, str):
response = HTTPResponse(
request=request,
code=200,
buffer=BytesIO(response.encode('utf8')),
)
elif isinstance(response, (dict, list)):
response = HTTPResponse(
request=request,
code=200,
buffer=BytesIO(json.dumps(response).encode('utf8')),
headers={'Content-Type': 'application/json'},
)
response_callback(response)
def setup_oauth_mock(
client,
host,
access_token_path,
user_path=None,
token_type='Bearer',
token_request_style='post',
):
"""setup the mock client for OAuth
generates and registers two handlers common to OAuthenticators:
- create the access token (POST access_token_path)
- get the user info (GET user_path)
and adds a method for creating a new mock handler to pass to .authenticate():
client.handler_for_user(user)
where user is the user-model to be returned by the user request.
Args:
host (str): the host to mock (e.g. api.github.com)
access_token_path (str): The path for the access token request (e.g. /access_token)
user_path (str): The path for requesting (e.g. /user)
token_type (str): the token_type field for the provider
"""
if user_path is None and token_request_style != "jwt":
raise TypeError("user_path is required unless token_request_style is jwt")
client.oauth_codes = oauth_codes = {}
client.access_tokens = access_tokens = {}
def access_token(request):
"""Handler for access token endpoint
Checks code and allocates a new token.
Replies with JSON model for the token.
"""
assert request.method == 'POST', request.method
if token_request_style == 'json':
body = request.body.decode('utf8')
try:
body = json.loads(body)
except ValueError:
return HTTPResponse(
request=request,
code=400,
reason="Body not JSON: %r" % body,
)
else:
code = body['code']
else:
query = urlparse(request.url).query
if not query:
query = request.body.decode('utf8')
query = parse_qs(query)
if 'code' not in query:
return HTTPResponse(
request=request,
code=400,
reason="No code in access token request: url=%s, body=%s"
% (
request.url,
request.body,
),
)
code = query['code'][0]
if code not in oauth_codes:
return HTTPResponse(
request=request,
code=403,
reason="No such code: %s" % code,
)
# consume code, allocate token
token = uuid.uuid4().hex
user = oauth_codes.pop(code)
access_tokens[token] = user
model = {
'access_token': token,
'token_type': token_type,
}
if token_request_style == 'jwt':
model['id_token'] = user['id_token']
return model
def get_user(request):
assert request.method == 'GET', request.method
auth_header = request.headers.get('Authorization')
if auth_header:
token = auth_header.split(None, 1)[1]
else:
query = parse_qs(urlparse(request.url).query)
if 'access_token' in query:
token = query['access_token'][0]
else:
return HTTPResponse(
request=request,
code=403,
reason='Missing Authorization header',
)
if token not in access_tokens:
return HTTPResponse(
request=request,
code=403,
reason='No such access token: %r' % token,
)
return access_tokens.get(token)
if isinstance(host, str):
hosts = [host]
else:
hosts = host
for host in hosts:
client.add_host(
host,
[
(access_token_path, access_token),
(user_path, get_user),
],
)
def handler_for_user(user):
"""Return a new mock RequestHandler
user should be the JSONable model that will ultimately be returned
from the get_user request.
"""
code = uuid.uuid4().hex
oauth_codes[code] = user
handler = Mock(spec=web.RequestHandler)
handler.find_user = Mock(return_value=None)
handler.get_argument = Mock(return_value=code)
handler.request = HTTPServerRequest(
method='GET', uri='https://hub.example.com?code=%s' % code
)
handler.hub = Mock(server=Mock(base_url='/hub/'), base_url='/hub/')
return handler
client.handler_for_user = handler_for_user
def mock_handler(Handler, uri='https://hub.example.com', method='GET', **settings):
"""Instantiate a Handler in a mock application"""
application = web.Application(
hub=Mock(
base_url='/hub/',
server=Mock(base_url='/hub/'),
),
cookie_secret=os.urandom(32),
db=Mock(rollback=Mock(return_value=None)),
**settings
)
request = HTTPServerRequest(
method=method,
uri=uri,
connection=Mock(),
)
handler = Handler(
application=application,
request=request,
)
handler._transforms = []
return handler
async def no_code_test(authenticator):
"""Run a test to exercise no code in the request"""
handler = Mock(spec=web.RequestHandler)
handler.get_argument = Mock(return_value=None)
with pytest.raises(web.HTTPError) as exc:
name = await authenticator.authenticate(handler)
assert exc.value.status_code == 400
| {
"content_hash": "1c1c6ec91190c9593c421a7a26c05446",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 91,
"avg_line_length": 32.13405797101449,
"alnum_prop": 0.5569962791746533,
"repo_name": "jupyterhub/oauthenticator",
"id": "c272329905193f6c05a46de7739ef74342779c01",
"size": "8869",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "oauthenticator/tests/mocks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "203781"
}
],
"symlink_target": ""
} |
class ScrapeShawBrosPipeline(object):
def process_item(self, item, spider):
return item
| {
"content_hash": "27e6d30480b7f9b3dc81f9b5e5db7b67",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 41,
"avg_line_length": 33.333333333333336,
"alnum_prop": 0.71,
"repo_name": "vlandham/shaw_bros",
"id": "b559415567e302c18b276fa33227b2e0569ad476",
"size": "294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrape/scrape_shaw_bros/pipelines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7564"
},
{
"name": "HTML",
"bytes": "6757122"
},
{
"name": "JavaScript",
"bytes": "26157"
},
{
"name": "Python",
"bytes": "7186"
},
{
"name": "R",
"bytes": "2581"
},
{
"name": "Shell",
"bytes": "81"
}
],
"symlink_target": ""
} |
import urllib.request
import urllib.parse
import io
from lxml import etree
MANIADB_ROOT_URL = 'http://www.maniadb.com/api/'
MANIADB_SEARCH_URL = 'search/' #검색 asp 파일 정보. example: http://www.maniadb.com/api/search/metallica/?sr=artist&display=10&key=example&v=0.5
MANIADB_ALBUM_URL = 'album/' # example: http://www.maniadb.com/api/album/712017/?key=example&v=0.5
class pymaniadb:
def __init__(self, apiKey, debug=False):
#EloManager main class init method
if debug == True:
print("paymaniadb Init...")
self.debug = debug
self.apiKey = apiKey
def searchDB(self, queryStr, itemtypeStr, targetStr="artist", displayNum=10):
SEARCH_PARAM = urllib.parse.urlencode({'key': self.apiKey, 'target': targetStr, 'itemtype':itemtypeStr, 'display':displayNum, 'v':'0.5'},
encoding='utf-8')
resultDictsArray = []
try:
requestFullUrl = MANIADB_ROOT_URL + MANIADB_SEARCH_URL + queryStr + '/?' + SEARCH_PARAM
print(requestFullUrl)
recvSearchXml = urllib.request.urlopen(requestFullUrl)
except IOError:
print("URL address or maniadb Error")
else:
parseEvents = ("start", "end")
tParseXml = io.BytesIO(recvSearchXml.read())
recvParsedXml = etree.iterparse(tParseXml, events=parseEvents)
for action, elem in recvParsedXml:
if action in ("start") and elem.tag == "item":
tmpResultDict = {'id':elem.values()[0]}
for subElem in elem.getchildren():
tmpResultDict[subElem.tag] = subElem.text
resultDictsArray.append(tmpResultDict)
return resultDictsArray
def getAlbumInfoFromID(self, idStr):
SEARCH_PARAM = urllib.parse.urlencode({'v':'0.5'}, encoding='utf-8')
resultDictsArray = []
try:
requestFullUrl = MANIADB_ROOT_URL + MANIADB_ALBUM_URL + idStr + '/?' + SEARCH_PARAM
print(requestFullUrl)
recvSearchXml = urllib.request.urlopen(requestFullUrl)
except IOError:
print("URL address or maniadb Error")
else:
parseEvents = ("start", "end")
tParseXml = io.BytesIO(recvSearchXml.read())
recvParsedXml = etree.iterparse(tParseXml, events=parseEvents)
for action, elem in recvParsedXml:
if action in ("start") and elem.tag == "item":
tmpResultDict = {'id':elem.values()[0]}
for subElem in elem.getchildren():
tmpResultDict[subElem.tag] = subElem.text
resultDictsArray.append(tmpResultDict)
return resultDictsArray
if __name__ == "__main__":
print('maniadb ver 0.3')
pymania = pymaniadb(apiKey="example")
print('searching keyword muse... (item type : album)')
dbSearchArray = pymania.searchDB(queryStr="muse", itemtypeStr="album")
print(dbSearchArray)
print('search album id 712017...')
albumSearchArray = pymania.getAlbumInfoFromID('712017')
print(albumSearchArray) | {
"content_hash": "a0fcda0a85038373938dca3a7e2f8be3",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 139,
"avg_line_length": 33.75,
"alnum_prop": 0.7066666666666667,
"repo_name": "Pusungwi/pymaniadb",
"id": "7832d3a0d8db6010f12c9abf096ff8ce41a8b7d4",
"size": "2840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/lib/pymaniadb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6299"
}
],
"symlink_target": ""
} |
import os
import sys
import subprocess
import argparse
from arnold import main
from flask.ext.script import Manager, Shell, Server
from contracts_api.app import create_app
from contracts_api.settings import DevConfig, ProdConfig
from contracts_api.database import db
if os.environ.get("CONTRACTS_API_ENV") == 'prod':
app = create_app(ProdConfig)
else:
app = create_app(DevConfig)
HERE = os.path.abspath(os.path.dirname(__file__))
TEST_PATH = os.path.join(HERE, 'tests')
manager = Manager(app)
def _make_context():
"""Return context dict for a shell session so you can access
app, db, and the User model by default.
"""
return {'app': app, 'db': db, 'User': User}
@manager.command
def migrate_up(fake=False):
main(
direction='up',
database=db,
directory=os.path.join(HERE, 'migrations'),
migration_module="migrations",
fake=fake
)
return False
@manager.command
def migrate_down(fake=False):
main(
direction='down',
database=db,
directory=os.path.join(HERE, 'migrations'),
migration_module="migrations",
fake=fake
)
return
manager.add_command('server', Server(port=os.environ.get('PORT', 9000)))
manager.add_command('shell', Shell(make_context=_make_context))
if __name__ == '__main__':
manager.run()
| {
"content_hash": "efc904159e64bba3a49ffe76f7c6385c",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 72,
"avg_line_length": 24.381818181818183,
"alnum_prop": 0.6644295302013423,
"repo_name": "codeforamerica/contracts-api",
"id": "d597df0e72c8b1b99bd640be127fc6fd6d8db8dd",
"size": "1387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "31944"
}
],
"symlink_target": ""
} |
import line_profiler
from timeit import default_timer as timer
import sys
import glob
import os
import re
import fnmatch
import math
import numpy as np
from vocabulary import Vocabulary
import collections
from scipy import sparse
from scipy.sparse import * # dok_matrix
from swig.array_of_trees import ArrayOfTrees
def _my_get_index_dtype(*a, **kw):
return np.int64
sparse.sputils.get_index_dtype = _my_get_index_dtype
sparse.compressed.get_index_dtype = _my_get_index_dtype
sparse.csr.get_index_dtype = _my_get_index_dtype
sparse.csr_matrix.get_index_dtype = _my_get_index_dtype
sparse.bsr.get_index_dtype = _my_get_index_dtype
argv = sys.argv
if len(argv) < 3:
print("usage: input_dir output_dir")
exit()
name_dir_in = argv[1]
name_dir_out = argv[2]
vocab = Vocabulary()
vocab.read_from_precomputed(name_dir_out)
size_window = 2
d = collections.deque(maxlen=size_window)
for i in range(size_window):
d.append(-1)
#matrix=dok_matrix((vocab.cnt_words, vocab.cnt_words), dtype=np.int64)
#matrix=lil_matrix((vocab.cnt_words, vocab.cnt_words), dtype=np.int64)
#matrix=dok_matrix((vocab.cnt_words, vocab.cnt_words), dtype=np.int64)
cnt_workers = 2
m = ArrayOfTrees(vocab.cnt_words)
def get_start_i(N, cnt_workers, id_worker):
if N < cnt_workers:
return min(N, id_worker)
length_of_range = ((N + 1) // cnt_workers)
start = length_of_range * id_worker
if id_worker < N % cnt_workers:
start += id_worker
else:
start += N % cnt_workers
return start
def get_interval(N, cnt_workers, id_worker):
return (get_start_i(N, cnt_workers, id_worker), get_start_i(N, cnt_workers, id_worker + 1))
def get_worker_id(N, cnt_workers, v):
if N < cnt_workers:
return v
length_of_range = ((N + 1) // cnt_workers)
remainder = N % cnt_workers
if v < remainder * (length_of_range + 1):
return v // (length_of_range + 1)
else:
return (v - remainder * (length_of_range + 1)) // length_of_range + N % cnt_workers
def accumulate(id1, id2):
# decide which worker accumulates
# matrix[id1,id2]+=1
m.accumulate(id1, id2)
pass
#@profile
def process_word(word):
id_word = vocab.get_id(word)
if word in {".", "!", "?", "…"}:
if True: # options.obey_sentence_bounds
id_word = -1
for i in range(size_window):
d.append(-1)
else:
if id_word < 0:
return
#print("word : '{}'".format(word))
d.append(id_word)
for i in range(size_window - 1):
if d[-1] == -1 or d[i] == -1:
continue
# print("accing",d[-1],d[i])
# print("accing",d[i],d[-1])
accumulate(d[-1], d[i])
accumulate(d[i], d[-1])
def process_file(name):
print("processing " + name)
f = open(name, errors="replace")
for line in f:
s = line.strip().lower()
re_pattern = r"[\w\-']+|[.,!?…]"
tokens = re.findall(re_pattern, s)
for token in tokens:
process_word(token)
start = timer()
for root, dir, files in os.walk(name_dir_in, followlinks=True):
for items in fnmatch.filter(files, "*"):
process_file(os.path.join(root, items))
end = timer()
print("done reading corpus, took", end - start)
start = end
print("-----dumping data------")
m.dump_csr(name_dir_out, vocab.l_frequencies)
exit()
print("-----converting to COO------")
matrix_coo = matrix.tocoo()
end = timer()
print(" took", end - start)
start = end
# matrix_coo.sort_indices()
# print(matrix)
print("-----converting to csr------")
matrix_csr = matrix_coo.tocsr()
end = timer()
print(" took", end - start)
start = end
print("-----converting back to coo------")
matrix_coo = matrix_csr.tocoo()
end = timer()
print(" took", end - start)
start = end
# print(matrix_coo)
cnt_words_processed = vocab.l_frequencies.sum()
# print(matrix_csr)
debug = False
if debug:
f_out = open("bigrams_list", "w")
for i in zip(matrix_coo.row, matrix_coo.col):
row = i[0]
col = i[1]
freq = matrix[i]
v = math.log2((freq * cnt_words_processed) /
(vocab.l_frequencies[col] * vocab.l_frequencies[col]))
f_out.write("{}\t{}\t{}\t{:0.5f}\n".format(
vocab.get_word_by_id(row), vocab.get_word_by_id(col), freq, v))
f_out.close()
# print(matrix_csr.indices.dtype)
data_pmi = np.zeros(matrix_csr.data.shape[0], dtype=np.float32)
ind = 0
for i in zip(matrix_coo.row, matrix_coo.col):
row = i[0]
col = i[1]
freq = matrix[i]
v = math.log2((freq * cnt_words_processed) /
(vocab.l_frequencies[col] * vocab.l_frequencies[col]))
data_pmi[ind] = v
ind += 1
# f_out=open("bigrams.data.bin","wb")
# f_out.close()
# print(matrix_csr.indices.dtype)
matrix_csr.indices.tofile(os.path.join(name_dir_out, "bigrams.col_ind.bin"))
matrix_csr.indptr.tofile(os.path.join(name_dir_out, "bigrams.row_ptr.bin"))
data_pmi.tofile(os.path.join(name_dir_out, "bigrams.data.bin"))
| {
"content_hash": "b4307288ed1188aa48955dd84718e17b",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 95,
"avg_line_length": 27.273224043715846,
"alnum_prop": 0.6183129633340012,
"repo_name": "undertherain/nlp_cooc",
"id": "bc1683cc694d62cdb9ed9b67eefcb257d3c29e6d",
"size": "5018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/get_cooccurrence.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "109121"
},
{
"name": "C++",
"bytes": "574635"
},
{
"name": "Makefile",
"bytes": "4863"
},
{
"name": "Python",
"bytes": "15593"
}
],
"symlink_target": ""
} |
import bz2
import io
import os
data = 'Contents of the example file go here.\n'
with bz2.BZ2File('example.bz2', 'wb') as output:
with io.TextIOWrapper(output, encoding='utf-8') as enc:
enc.write(data)
os.system('file example.bz2')
| {
"content_hash": "e44a0d44d365086a8a4967d39c35ef78",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 59,
"avg_line_length": 22.363636363636363,
"alnum_prop": 0.6910569105691057,
"repo_name": "jasonwee/asus-rt-n14uhp-mrtg",
"id": "9e9c3e2544eb211ac0cf75e89a1fbe9051db60c8",
"size": "246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lesson_data_compression_and_archiving/bz2_file_write.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "45876"
},
{
"name": "HTML",
"bytes": "107072"
},
{
"name": "JavaScript",
"bytes": "161335"
},
{
"name": "Python",
"bytes": "6923750"
},
{
"name": "Shell",
"bytes": "7616"
}
],
"symlink_target": ""
} |
version = "0.1"
authors = ("Peter Stroia-Williams", '')
copyright = "© 2010"
comments = "A somewhat messily coded but functional match viewer."
import matchviewcanvas
import pygtk
pygtk.require("2.0")
import gtk
import os
import sys
class MatchViewerApp(object):
matchListFN = None
def on_window_destroy(self, widget) :
gtk.main_quit()
return True
def saveProject(self) :
projFile = open(self.projFN, "w")
projDir = os.path.dirname(self.projFN)
projFile.write(os.path.relpath(self.im1widgetHandler.imageFilename,projDir) + '\n')
projFile.write(os.path.relpath(self.im2widgetHandler.imageFilename,projDir) + '\n')
projFile.write(os.path.relpath(self.matchListFN,projDir) + '\n')
if self.rowColRadioButton.get_active() :
projFile.write("row col format\n")
else :
projFile.write("xy format\n")
projFile.close()
def openProject(self) :
projFile = open(self.projFN, "r")
projDir = os.path.dirname(self.projFN)
line1 = projFile.readline()
line2 = projFile.readline()
line3 = projFile.readline()
line4 = projFile.readline()
projFile.close()
self.im1widgetHandler.set_image_file(os.path.normpath(projDir + '/' + line1.strip()))
self.im2widgetHandler.set_image_file(os.path.normpath(projDir + '/' + line2.strip()))
matchListFN = os.path.normpath(projDir + '/' + line3.strip())
if line4 == "row col format\n" :
self.rowColRadioButton.set_active(True)
self.xyRadioButton.set_active(False)
else :
self.rowColRadioButton.set_active(False)
self.xyRadioButton.set_active(True)
self.loadMatchList(matchListFN)
self.builder.get_object("im1chooserbutton").set_filename(self.im1widgetHandler.imageFilename)
self.builder.get_object("im2chooserbutton").set_filename(self.im2widgetHandler.imageFilename)
self.builder.get_object("matchlistchooserbutton4").set_filename(matchListFN)
def on_swapImages_menuitem_activate(self, widget) :
tempImFN = self.im1widgetHandler.imageFilename
self.im1widgetHandler.set_image_file(self.im2widgetHandler.imageFilename)
self.im2widgetHandler.imageFilename = tempImFN
self.builder.get_object("im1chooserbutton").set_filename(self.im1widgetHandler.imageFilename)
self.builder.get_object("im2chooserbutton").set_filename(self.im2widgetHandler.imageFilename)
self.redrawManager.force_redraw_all()
def on_new_menuitem_activate(self, widget) :
self.openWindow.show()
def on_open_menuitem_activate(self, widget) :
dialog = gtk.FileChooserDialog("Open Project", None, gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
response = dialog.run()
if response == gtk.RESPONSE_OK:
self.projFN = dialog.get_filename()
print dialog.get_filename(), 'selected'
self.openProject()
elif response == gtk.RESPONSE_CANCEL:
print 'Closed, no files selected'
dialog.hide()
def on_saveAsMenuItem_activate(self, widget) :
dialog = gtk.FileChooserDialog("Save Project As", None, gtk.FILE_CHOOSER_ACTION_SAVE,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
dialog.set_do_overwrite_confirmation(True)
response = dialog.run()
if response == gtk.RESPONSE_OK:
self.projFN = dialog.get_filename()
self.saveProject()
elif response == gtk.RESPONSE_CANCEL:
print 'Closed, no files selected'
dialog.hide()
def on_saveMenuItem_activate(self, widget) :
if self.projFN == None :
self.on_saveAsMenuItem_activate(widget)
else :
self.saveProject()
def on_openWindow_delete_event(self, widget, event = None) :
self.openWindow.hide()
return True
def on_saveAsDialog_delete_event(self, widget, event = None) :
self.saveAsDialog.hide()
return True
def loadMatchList(self, filename) :
self.matchListFN = filename
self.pointList1 = []
matchListFile = open(self.matchListFN)
matchList = []
for line in matchListFile :
matchList.append(line)
pointList1, pointList2 = matchviewcanvas.loadMatchList(filename)
self.im1widgetHandler.set_point_list(pointList1)
self.im2widgetHandler.set_point_list(pointList2)
self.redrawManager.force_redraw_all()
def on_matchlistchooserbutton4_file_set(self, widget) :
self.loadMatchList(widget.get_filename())
def on_rowCol_radiobutton_toggled(self, widget, data=None) :
if(self.matchListFN):
self.loadMatchList(self.matchListFN)
def on_numbermatchesmenuitem_toggled(self, widget) :
self.im1widgetHandler.showNumbers = self.im2widgetHandler.showNumbers = widget.get_active()
self.redrawManager.force_redraw_all()
def on_aboutMenuItem_activate(self, widget) :
dialog = gtk.AboutDialog()
dialog.set_name("MatchViewer")
dialog.set_version(version)
dialog.set_authors(authors)
dialog.set_copyright(copyright)
dialog.set_comments(comments)
dialog.run()
dialog.destroy()
def getGladeFilename(self):
dir = os.path.dirname(__file__)
print dir
return os.path.join(dir, "MatchViewer.glade")
def __init__(self):
self.builder = gtk.Builder()
self.builder.add_from_file(self.getGladeFilename())
drawArea1 = self.builder.get_object("im1DrawingArea")
drawArea2 = self.builder.get_object("im2DrawingArea")
self.redrawManager = matchviewcanvas.RedrawManager([drawArea1, drawArea2])
self.im1widgetHandler = matchviewcanvas.CairoImageWidgetEventHandler(drawArea1, self.redrawManager)
self.im2widgetHandler = matchviewcanvas.CairoImageWidgetEventHandler(drawArea2, self.redrawManager)
self.builder.connect_signals({ "on_window_destroy" : self.on_window_destroy,
"on_new_menuItem_activate" : self.on_new_menuitem_activate,
"on_openWindow_delete_event" : self.on_openWindow_delete_event,
"on_openWindowCloseButton_clicked" : self.on_openWindow_delete_event,
"on_im1chooserbutton_file_set" : self.im1widgetHandler.on_file_set,
"on_im2chooserbutton_file_set" : self.im2widgetHandler.on_file_set,
"on_matchlistchooserbutton4_file_set" : self.on_matchlistchooserbutton4_file_set,
"on_numbermatchesmenuitem_toggled" : self.on_numbermatchesmenuitem_toggled,
"on_saveAsMenuItem_activate" : self.on_saveAsMenuItem_activate,
"on_saveAsDialog_delete_event" : self.on_saveAsDialog_delete_event,
"on_saveMenuItem_activate" : self.on_saveMenuItem_activate,
"on_open_menuitem_activate" : self.on_open_menuitem_activate,
"on_aboutMenuItem_activate" : self.on_aboutMenuItem_activate,
"on_rowCol_radiobutton_toggled" : self.on_rowCol_radiobutton_toggled,
"on_swapImages_menuitem_activate" : self.on_swapImages_menuitem_activate
})
self.window = self.builder.get_object("window")
self.window.show()
self.openWindow = self.builder.get_object("openWindow")
self.rowColRadioButton = self.builder.get_object("rowCol_radiobutton")
self.xyRadioButton = self.builder.get_object("xy_radiobutton")
self.rowColRadioButton.set_active(True)
self.xyRadioButton.set_active(False)
self.projFN = None
if len(sys.argv) == 2 :
self.projFN = sys.argv[1]
if not os.path.isabs(self.projFN) :
self.projFN = os.path.abspath(self.projFN)
self.openProject()
if __name__ == "__main__":
# pos = 10, 20
# elipParams = 1, 20, 16
# eliplse = Elipse(pos,elipParams)
app = MatchViewerApp()
gtk.main()
| {
"content_hash": "b91c90c8f478de8b8444e578992b2bf8",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 120,
"avg_line_length": 39.828054298642535,
"alnum_prop": 0.6146330379459214,
"repo_name": "peterSW/MatchViewer",
"id": "4b0fc458b47242f9d052ab6a3ee844dbe45cf1c0",
"size": "9486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/MatchView.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23418"
}
],
"symlink_target": ""
} |
frontConvertParent = '/var/local/meTypesetTests/testingDocuments/jolOutput'
frontSampleParent = '/var/local/meTypesetTests/testingDocuments/jolXML'
bodyConvertParent = '/var/local/meTypesetTests/testingDocuments/coactionDocxOutput/'
bodySampleParent = '/var/local/meTypesetTests/testingDocuments/coactionXML'
necessaryMatchElems = ['body//sec/title', 'body//table-wrap//caption', 'body//list/list-item/p', 'front//email', 'front//journal-title', 'front//article-title', 'front//aff', 'back//fn', 'front//abstract//p' , 'body//fig//caption', 'front//name', 'front//institution']
levThreshold = 30
acceptableFailureProportion = .25
acceptableFailurePercentage = '25%'
| {
"content_hash": "dc86a6c448931049fcf0ee139baf097b",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 268,
"avg_line_length": 83.25,
"alnum_prop": 0.7732732732732732,
"repo_name": "jnicolls/meTypeset-Test",
"id": "fea8b97469b70332813ec115053183a180082974",
"size": "956",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "testvars.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26761"
},
{
"name": "RobotFramework",
"bytes": "51164"
}
],
"symlink_target": ""
} |
import mechanicalsoup
browser = mechanicalsoup.StatefulBrowser()
browser.open("http://httpbin.org/")
print(browser.url)
browser.follow_link("forms")
print(browser.url)
print(browser.page)
browser.select_form('form[action="/post"]')
browser["custname"] = "Me"
browser["custtel"] = "00 00 0001"
browser["custemail"] = "nobody@example.com"
browser["size"] = "medium"
browser["topping"] = "onion"
browser["topping"] = ("bacon", "cheese")
browser["comments"] = "This pizza looks really good :-)"
# Uncomment to launch a real web browser on the current page.
# browser.launch_browser()
# Uncomment to display a summary of the filled-in form
# browser.form.print_summary()
response = browser.submit_selected()
print(response.text)
| {
"content_hash": "f6c9dfe5c0fde0d9b0d7027e7ab620a7",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 61,
"avg_line_length": 27.037037037037038,
"alnum_prop": 0.7273972602739726,
"repo_name": "hemberger/MechanicalSoup",
"id": "f02beeb5a5f638fb62fb9e5e29555c420a539d45",
"size": "730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/expl_httpbin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "102728"
}
],
"symlink_target": ""
} |
'''
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
import signal, time, struct, fcntl, termios, os, sys
# a dumb PAM will print the password prompt first then set ECHO
# False. What it should do it set ECHO False first then print the
# prompt. Otherwise, if we see the password prompt and type out
# password real fast before it turns off ECHO then some or all of
# our password might be visibly echod back to us. Sounds unlikely?
# It happens.
print("fake password:")
sys.stdout.flush()
time.sleep(3)
attr = termios.tcgetattr(sys.stdout)
attr[3] = attr[3] & ~termios.ECHO
termios.tcsetattr(sys.stdout, termios.TCSANOW, attr)
time.sleep(12)
attr[3] = attr[3] | termios.ECHO
termios.tcsetattr(sys.stdout, termios.TCSANOW, attr)
time.sleep(2)
| {
"content_hash": "2f6b6cddef780cf17b1b32f056e0b58b",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 76,
"avg_line_length": 42.921052631578945,
"alnum_prop": 0.7516860821581851,
"repo_name": "HPPTECH/hpp_IOSTressTest",
"id": "8ccf13f49ffc8e70b8d2d72faed95d8dd7c4207d",
"size": "1653",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Resources/ssh/pexpect-3.2/tests/echo_wait.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5571"
},
{
"name": "C",
"bytes": "5083"
},
{
"name": "CSS",
"bytes": "53608"
},
{
"name": "HTML",
"bytes": "2732176"
},
{
"name": "JavaScript",
"bytes": "945408"
},
{
"name": "Makefile",
"bytes": "5568"
},
{
"name": "Python",
"bytes": "5810318"
},
{
"name": "Shell",
"bytes": "21948"
}
],
"symlink_target": ""
} |
import os
import sys
from unittest import defaultTestLoader, TextTestRunner, TestSuite
TESTS = ('asyncflux_test', 'client_test', 'clusteradmin_test', 'database_test',
'shardspace_test', 'user_test', 'util_test', )
def make_suite(prefix='', extra=(), force_all=False):
tests = TESTS + extra
test_names = list(prefix + x for x in tests)
suite = TestSuite()
suite.addTest(defaultTestLoader.loadTestsFromNames(test_names))
return suite
def additional_tests():
"""
This is called automatically by setup.py test
"""
return make_suite('tests.')
def main():
my_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.abspath(os.path.join(my_dir, '..')))
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--with-pep8', action='store_true', dest='with_pep8',
default=True)
parser.add_option('--with-pyflakes', action='store_true',
dest='with_pyflakes', default=True)
parser.add_option('--force-all', action='store_true', dest='force_all',
default=False)
parser.add_option('-v', '--verbose', action='count', dest='verbosity',
default=0)
parser.add_option('-q', '--quiet', action='count', dest='quietness',
default=0)
options, extra_args = parser.parse_args()
has_pep8 = False
try:
import pep8
has_pep8 = True
except ImportError:
if options.with_pep8:
sys.stderr.write('# Could not find pep8 library.')
sys.exit(1)
if has_pep8:
guide_main = pep8.StyleGuide(
ignore=['E402'],
paths=['asyncflux/'],
exclude=[],
max_line_length=80,
)
guide_tests = pep8.StyleGuide(
ignore=['E221'],
paths=['tests/'],
max_line_length=80,
)
for guide in (guide_main, guide_tests):
report = guide.check_files()
if report.total_errors:
sys.exit(1)
if options.with_pyflakes:
try:
import pyflakes
assert pyflakes # silence pyflakes
except ImportError:
sys.stderr.write('# Could not find pyflakes library.\n')
sys.exit(1)
from pyflakes import api, reporter
warnings = api.checkRecursive(['asyncflux', 'tests'],
reporter._makeDefaultReporter())
if warnings > 0:
sys.exit(1)
suite = make_suite('', tuple(extra_args), options.force_all)
runner = TextTestRunner(verbosity=options.verbosity - options.quietness + 1)
result = runner.run(suite)
sys.exit(not result.wasSuccessful())
if __name__ == '__main__':
main()
| {
"content_hash": "92cc1c7e83b07caca980cf12cad5ae54",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 80,
"avg_line_length": 32.01136363636363,
"alnum_prop": 0.5708200212992546,
"repo_name": "puentesarrin/asyncflux",
"id": "77a3da1c0dc15d0cc1228599708d9e6270edda55",
"size": "2841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/runtests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "99841"
}
],
"symlink_target": ""
} |
import pytest
from aiogear import PacketType, Worker
@pytest.fixture
def worker():
w = Worker()
return w
@pytest.mark.parametrize('param', [10, '10'])
def test_can_do_timeout(worker, param):
# Expect no exception
worker.send(PacketType.CAN_DO_TIMEOUT, 'func', param)
assert 1 == 1
| {
"content_hash": "5e5ee4c4fae3fc29b5616b7802b957a8",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 57,
"avg_line_length": 20.333333333333332,
"alnum_prop": 0.6786885245901639,
"repo_name": "sardok/aiogear",
"id": "aedc4bc157f95dccfc7e93b75c17b40084f21656",
"size": "305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_worker_request.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39626"
}
],
"symlink_target": ""
} |
"""Django settings for workbench project."""
import json
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DJFS = {'type': 'osfs',
'directory_root': 'workbench/static/djpyfs',
'url_root': '/static/djpyfs'}
DEBUG = True
if os.environ.get('EXCLUDE_SAMPLE_XBLOCKS') == 'yes':
EXCLUDED_XBLOCKS = {
'allscopes_demo',
'attempts_scoreboard_demo',
'equality_demo',
'filethumbs',
'helloworld_demo',
'html_demo',
'problem_demo',
'sidebar_demo',
'slider_demo',
'textinput_demo',
'thumbs',
'view_counter_demo',
}
else:
EXCLUDED_XBLOCKS = set()
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'workbench', 'templates'),
os.path.join(BASE_DIR, 'sample_xblocks', 'basic', 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
},
},
]
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
if 'WORKBENCH_DATABASES' in os.environ:
DATABASES = json.loads(os.environ['WORKBENCH_DATABASES'])
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'var/workbench.db'
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake'
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
# Make this unique, and don't share it with anybody.
SECRET_KEY = '5ftdd9(@p)tg&bqv$(^d!63psz9+g+_i5om_e%!32%po2_+%l7'
MIDDLEWARE = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'workbench.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'workbench.wsgi.application'
TEMPLATE_DIRS = []
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'djpyfs',
'workbench',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
]
# Only use django-debug-toolbar if it has been installed.
# Installing django-debug-toolbar before running syncdb may cause a
# DatabaseError when trying to run syncdb.
try:
import debug_toolbar # pylint: disable=unused-import
INSTALLED_APPS += ('debug_toolbar',)
except ImportError:
pass
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'logfile': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'var/workbench.log',
'maxBytes': 50000,
'backupCount': 2,
}
},
'loggers': {
'django.request': {
'handlers': ['logfile'],
'level': 'DEBUG',
'propagate': True,
},
'django': {
'level': 'DEBUG',
'handlers': ['logfile'],
}
}
}
WORKBENCH = {
'reset_state_on_restart': (
os.environ.get('WORKBENCH_RESET_STATE_ON_RESTART', "false").lower() == "true"
),
'services': {
'fs': 'xblock.reference.plugins.FSService',
'settings': 'workbench.services.SettingsService',
}
}
try:
from .private import * # pylint: disable=wildcard-import,import-error,useless-suppression
except ImportError:
pass
| {
"content_hash": "906600489387629747bbcd490750c5c0",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 94,
"avg_line_length": 30.047210300429185,
"alnum_prop": 0.6459077274675047,
"repo_name": "stvstnfrd/xblock-sdk",
"id": "1842a56276650e6943be81339656c47d25c6ab12",
"size": "7001",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workbench/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "14419"
},
{
"name": "Dockerfile",
"bytes": "681"
},
{
"name": "HTML",
"bytes": "8020"
},
{
"name": "JavaScript",
"bytes": "237802"
},
{
"name": "Makefile",
"bytes": "2918"
},
{
"name": "Python",
"bytes": "146395"
}
],
"symlink_target": ""
} |
"""Test ref results for data with no coincidences."""
import numpy
import pytest
from pytest import approx
from ndd.estimators import AsymptoticNsb, Nsb, Plugin
from ndd.exceptions import NddError
N = (10, 10)
K = (10, 1000)
@pytest.fixture(params=zip(N, K))
def data(request):
n, k = request.param
return {'nk': numpy.array([1] * n), 'k': k}
def test_Nsb(data):
"""The Nsb estimate should be somewhat close to log(k)"""
estimator = Nsb()
relative_error = 1 - estimator(**data) / numpy.log(data['k'])
assert 0 < relative_error < 0.2
def test_Asymptotic(data):
"""Should raise an exception"""
estimator = AsymptoticNsb()
with pytest.raises(NddError):
estimator(**data)
def test_Plugin(data):
"""Should be close to the log of #visited bins with frequency > 0"""
estimator = Plugin(alpha=None)
k = sum(data['nk'] > 0)
assert estimator(**data) == approx(numpy.log(k))
def test_Plugin_pseudo(data):
"""Should be close to log(cardinality)"""
estimator = Plugin(alpha=1)
assert estimator(**data) == approx(numpy.log(data['k']), rel=1.e-3)
| {
"content_hash": "b3003917d04e959c83604a3a97d2d91b",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 72,
"avg_line_length": 25.953488372093023,
"alnum_prop": 0.6523297491039427,
"repo_name": "simomarsili/ndd",
"id": "b2221cde5f50f1eec59dd5eeb1a04ba5484d240a",
"size": "1215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_singletons.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "770"
},
{
"name": "Python",
"bytes": "87492"
}
],
"symlink_target": ""
} |
import pandas as pd
import logging as log
from ..utils.database import dbutils
def get_daily_call_counts(db_connection, timeseries_table):
"""
Gets the time series data per customer from the database. This data
contains every unique customer ID from the time series set along with
the every unique call date for each customer with the associated number
of calls made or received.
Args:
db_connection (Psycopg.connection): The database connection
timeseries_table (string): The name of the database table that contains
the time series data
Returns:
Pandas.DataFrame: The time series data for each unique user. It has the
columns cust_id, date, date_diff, calls,
calls_in_florence, calls_near_airport
"""
log.info('Start reading from DB')
query = """
SELECT cust_id,
(cust_id - LAG(cust_id) OVER ())=0 AS same_cust,
date_ AS date,
EXTRACT(DAYS FROM date_ - LAG(date_) OVER ()) - 1 AS date_diff,
calls,
calls_in_florence_city AS calls_in_florence,
calls_near_airport
FROM %s
""" % timeseries_table
log.info('Finished reading from DB')
return pd.read_sql(query, con=db_connection)
# TODO: cleanup or snip
def get_active_counts(counts):
counts_agg = counts.groupby('cust_id')['date'].count().reset_index(name='days_active')
counts['days_active'] = counts_agg['days_active']
print(counts['days_active'].unique())
print(counts['calls'].nun)
counts_subset = counts[(counts['days_active'] > 1) | (counts['calls'] > 4)]
return counts_subset
def get_italian_trips(db_connection):
"""
Gets the time series data for all Italian visitors from the database
Args:
db_connection (Psycopg.connection): The database connection
Returns:
Pandas.DataFrame: The time series data for each unique Italian visitor.
It has the columns cust_id, date, date_diff, calls,
calls_in_florence, calls_near_airport
"""
counts = get_daily_call_counts(db_connection,
'optourism.italians_timeseries_daily')
return get_trips(counts)
def get_foreign_trips(db_connection):
"""
Gets the time series data for all Foreign visitors from the database
Args:
db_connection (Psycopg.connection): The database connection
Returns:
Pandas.DataFrame: The time series data for each unique Foreign visitor.
It has the columns cust_id, date, date_diff, calls,
calls_in_florence, calls_near_airport
"""
counts = get_daily_call_counts(db_connection,
'optourism.foreigners_timeseries_daily')
return get_trips(counts)
def frequency(dataframe, column_name):
"""
Gets the frequency of each unique value in a specified column.
Args:
dataframe (Pandas.DataFrame): The data that contains the column for the
the frequency calculation
column_name (string): Name of the column to calculate the frequency for
Returns:
Pandas.DataFrame: A new dataframe that has each unique value from the
specified column along with the percentage frequency,
the cumulative percentage, and the ccdf
"""
out = dataframe[column_name].value_counts().to_frame()
out.columns = ['frequency']
out.index.name = column_name
out.reset_index(inplace=True)
out = out.sort_values(column_name)
out['percentage'] = out['frequency'] / out['frequency'].sum()
out['cumulative'] = out['frequency'].cumsum() / out['frequency'].sum()
out['ccdf'] = 1 - out['cumulative']
return out
def get_trips(counts, only_start=False, gap_length=3):
counts.iloc[0, 1] = False
same_cust_false = counts['same_cust'] == False
same_cust_true = ~same_cust_false
counts.loc[same_cust_false, 'date_diff'] = None
gap_threshold = counts['date_diff'] < gap_length
counts['in_florence'] = (counts['calls_in_florence'] > 0) | \
(counts['calls_near_airport'] > 0)
in_florence_true = counts['in_florence'] == True
counts['calls_out_florence'] = counts['calls'] - counts['calls_in_florence']
counts['out_florence'] = counts['calls_out_florence'] > 0
counts['was_in_florence'] = counts['in_florence'].shift(1)
counts['willbe_in_florence'] = counts['in_florence'].shift(-1)
counts.loc[same_cust_false, 'was_in_florence'] = None
was_in_florence_true = counts['was_in_florence'] == True
was_in_florence_false = ~was_in_florence_true
willbe_in_florence_true = counts['willbe_in_florence'] == True
willbe_in_florence_false = ~willbe_in_florence_true
counts['was_out_florence'] = counts['out_florence'].shift(1)
counts['willbe_out_florence'] = counts['out_florence'].shift(-1)
counts.loc[same_cust_false, 'was_out_florence'] = None
counts['trip'] = ''
# Do less specific first
counts.loc[
same_cust_false &
in_florence_true,
'trip'
] = 'first'
if not only_start:
counts.loc[
same_cust_true &
(counts['same_cust'].shift(-1) == False) &
in_florence_true,
'trip'
] = 'last'
# And more specific next
counts.loc[
same_cust_true &
gap_threshold &
was_in_florence_true &
in_florence_true,
'trip'
] = 'continue'
if not only_start:
counts.loc[
same_cust_true &
gap_threshold &
was_in_florence_true &
in_florence_true &
willbe_in_florence_false,
'trip'
] = 'end'
counts.loc[
same_cust_true &
gap_threshold &
was_in_florence_false &
in_florence_true,
'trip'
] = 'start'
counts['on_trip'] = counts['trip'] != ''
trips = counts[['cust_id', 'same_cust', 'date', 'date_diff',
'calls_in_florence', 'calls_out_florence', 'trip',
'on_trip']]
num = ((trips['on_trip'].shift(1) != trips['on_trip']).astype(int).cumsum())
trips['trip_id'] = num * (trips['on_trip']).astype(int)
trips_group = trips[trips['trip_id'] != 0][['cust_id', 'trip_id']]
trips_group = trips_group.groupby(['cust_id', 'trip_id']).size().to_frame()
return counts, trips_group
def get_length_gaps_between_trips(grouped_counts):
"""
Gets the frequency of length of gaps between trips for a customer
Args:
grouped_counts (Pandas.DataFrame): The grouped dataframe returned from
a get_trips method call
Returns:
Pandas.DataFrame: the dataframe containing the frequencies for each
gap (in days) between trips
"""
return frequency(grouped_counts[grouped_counts['date_diff'] > 0],
'date_diff')
def get_trip_length(grouped_counts):
"""
Gets the frequency of the length of a trip for a customer
Args:
grouped_counts (Pandas.DataFrame): The grouped dataframe returned from
a get_trips method call
Returns:
Pandas.DataFrame: the dataframe containing the frequencies for each
trip length (in days)
"""
return frequency(grouped_counts, 0)
def get_number_trips(grouped_counts):
"""
Gets the frequency of number of trips the customers make
Args:
grouped_counts (Pandas.DataFrame): The grouped dataframe returned from
a get_trips method call
Returns:
Pandas.DataFrame: the dataframe containing the frequencies for each
number of trips
"""
return frequency(grouped_counts.groupby('cust_id').count(), 0)
def get_trip_length_for_onetime_visitors(grouped_counts):
"""
Gets the frequency of length of gaps between trips for a customer
Args:
grouped_counts (Pandas.DataFrame): The grouped dataframe returned from
a get_trips method call
Returns:
Pandas.DataFrame: the dataframe containing the frequencies for each
gap (in days) between trips
"""
df = grouped_counts[grouped_counts.groupby('cust_id').count() == 1]
return frequency(df, 0)
def main():
connection = dbutils.connect()
italian_trips, italian_grouped = get_italian_trips(connection)
foreign_trips, foreign_grouped = get_foreign_trips(connection)
italian_lengths = get_trip_length_for_onetime_visitors(italian_grouped)
foreign_lengths = get_trip_length_for_onetime_visitors(foreign_grouped)
# TODO: check that new line works in this print statement
print('----- Length of stay for Italian visitors ----- \n')
print(italian_lengths.head(10))
print('----- Length of stay for Foreign visitors -----')
print(foreign_lengths.head(10))
if __name__ == '__main__':
main()
| {
"content_hash": "fcb5c056e9e3382a456ee0f9c6edb526",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 90,
"avg_line_length": 32.32867132867133,
"alnum_prop": 0.6015574302401038,
"repo_name": "DSSG2017/florence",
"id": "05b964241529d4aebb66252dbfdd7330056cf5bf",
"size": "9246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/features/trip_segmenter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "95882"
},
{
"name": "HTML",
"bytes": "2664888"
},
{
"name": "JavaScript",
"bytes": "88363"
},
{
"name": "Jupyter Notebook",
"bytes": "33347362"
},
{
"name": "PLSQL",
"bytes": "10885"
},
{
"name": "PLpgSQL",
"bytes": "1276"
},
{
"name": "Python",
"bytes": "112585"
},
{
"name": "SQLPL",
"bytes": "8077"
},
{
"name": "Shell",
"bytes": "8947"
}
],
"symlink_target": ""
} |
#pylint: disable-msg=C0301
#pylint: disable-msg=F0401
#pylint: disable-msg=W0142
"""Tests for application.py"""
import sys
import os
import unittest
import time
#import pprint
#import pdb
import warnings
from threading import Thread
import ctypes
import mock
import six
sys.path.append(".")
from pywinauto import Desktop
from pywinauto import win32defines
from pywinauto import application
from pywinauto.controls import hwndwrapper
from pywinauto.application import Application
from pywinauto.application import WindowSpecification
from pywinauto.application import process_module
from pywinauto.application import process_get_modules
from pywinauto.application import ProcessNotFoundError
from pywinauto.application import AppStartError
from pywinauto.application import AppNotConnected
from pywinauto.controls.common_controls import TrackbarWrapper
from pywinauto import findwindows
from pywinauto import findbestmatch
from pywinauto.timings import Timings
from pywinauto.timings import TimeoutError
from pywinauto.timings import WaitUntil
from pywinauto.timings import always_wait_until
from pywinauto.timings import always_wait_until_passes
from pywinauto.timings import timestamp # noqa: E402
from pywinauto.sysinfo import is_x64_Python
from pywinauto.sysinfo import is_x64_OS
from pywinauto.sysinfo import UIA_support
#application.set_timing(1, .01, 1, .01, .05, 0, 0, .1, 0, .01)
# About dialog may take some time to load
# so make sure that we wait for it.
Timings.window_find_timeout = 5
def _notepad_exe():
if is_x64_Python() or not is_x64_OS():
return r"C:\Windows\System32\notepad.exe"
else:
return r"C:\Windows\SysWOW64\notepad.exe"
mfc_samples_folder_32 = mfc_samples_folder = os.path.join(
os.path.dirname(__file__), r"..\..\apps\MFC_samples")
if is_x64_Python():
mfc_samples_folder = os.path.join(mfc_samples_folder, 'x64')
class ApplicationWarningTestCases(unittest.TestCase):
"""Unit tests for warnings in the application.Application class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
# Force Display User and Deprecation warnings every time
# Python 3.3 + nose/unittest tries really hard to suppress them
for warning in (UserWarning, PendingDeprecationWarning):
warnings.simplefilter('always', warning)
if is_x64_Python():
self.sample_exe = os.path.join(mfc_samples_folder,
"CmnCtrl1.exe")
self.sample_exe_inverted_bitness = os.path.join(mfc_samples_folder_32,
"CmnCtrl1.exe")
else:
self.sample_exe = os.path.join(mfc_samples_folder_32, "CmnCtrl1.exe")
self.sample_exe_inverted_bitness = os.path.join(mfc_samples_folder,
"x64",
"CmnCtrl1.exe")
def testStartWarning3264(self):
if not is_x64_OS():
self.defaultTestResult()
return
warnings.filterwarnings('always', category=UserWarning, append=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
app = Application().start(self.sample_exe_inverted_bitness)
app.kill()
assert len(w) >= 1
assert issubclass(w[-1].category, UserWarning)
assert "64-bit" in str(w[-1].message)
def testConnectWarning3264(self):
if not is_x64_OS():
self.defaultTestResult()
return
app = Application().start(self.sample_exe_inverted_bitness)
# Appveyor misteries...
self.assertEqual(app.is_process_running(), True)
with mock.patch("warnings.warn") as mockWarn:
Application().connect(process=app.process)
app.kill()
args, kw = mockWarn.call_args
assert len(args) == 2
assert "64-bit" in args[0]
assert args[1].__name__ == 'UserWarning'
class ApplicationWin32KillTestCases(unittest.TestCase):
"""Unit tests for method Application.kill() with backend='win32'"""
backend = 'win32'
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.sample_exe = os.path.join(mfc_samples_folder, 'RowList.exe')
self.app = Application(backend=self.backend).start(self.sample_exe)
self.target_process = self.app.process
def tearDown(self):
self.app.kill(soft=False)
def test_kill_hard(self):
self.assertTrue(self.app.kill(soft=False))
self.assertRaises(ProcessNotFoundError, Application().connect, process=self.target_process)
def test_kill_soft(self):
self.assertTrue(self.app.kill(soft=True))
self.assertRaises(ProcessNotFoundError, Application().connect, process=self.target_process)
def test_already_killed_hard(self):
self.assertTrue(self.app.kill(soft=False))
self.assertRaises(ProcessNotFoundError, Application().connect, process=self.target_process)
self.assertTrue(self.app.kill(soft=False)) # already killed, returned True anyway
def test_already_killed_soft(self):
self.assertTrue(self.app.kill(soft=False))
self.assertRaises(ProcessNotFoundError, Application().connect, process=self.target_process)
self.assertTrue(self.app.kill(soft=True)) # already killed, returned True anyway
def test_kill_soft_with_modal_subdialog(self):
"""Kill the app with modal subdialog to cover win.force_close() call"""
self.app.RowListSampleApplication.menu_select('Help->About RowList...')
if self.backend == 'win32':
self.app.window(title='About RowList').wait('visible')
elif self.backend == 'uia':
self.app.RowListSampleApplication.child_window(title='About RowList').wait('visible')
else:
raise NotImplementedError('test_kill_soft_with_modal_subdialog: ' \
'backend "{}" is not supported'.format(self.backend))
self.assertTrue(self.app.kill(soft=True))
self.assertRaises(ProcessNotFoundError, Application().connect, process=self.target_process)
self.assertTrue(self.app.kill(soft=True)) # already killed, returned True anyway
if UIA_support:
class ApplicationUiaKillTestCases(ApplicationWin32KillTestCases):
"""Unit tests for method Application.kill() with backend='uia'"""
backend = 'uia'
# the same test methods run here
if ctypes.windll.shell32.IsUserAnAdmin() == 0:
class AdminTestCases(ApplicationWarningTestCases):
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
super(AdminTestCases, self).setUp()
cmd = 'powershell -Command "Start-Process {} -Verb RunAs"'.format(self.sample_exe)
self.app = Application().start(cmd, wait_for_idle=False)
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
super(AdminTestCases, self).tearDown()
def test_non_admin_warning(self):
warnings.filterwarnings('always', category=UserWarning, append=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.app = Application().connect(title="Common Controls Sample", timeout=20)
assert len(w) >= 1
assert issubclass(w[-1].category, UserWarning)
assert "process has no rights" in str(w[-1].message)
def test_non_admin_click(self):
self.app = Application().connect(title="Common Controls Sample", timeout=20)
with self.assertRaises(RuntimeError):
self.app.CommonControlsSample.OK.click()
with self.assertRaises(RuntimeError):
self.app.CommonControlsSample.OK.click_input()
with self.assertRaises(RuntimeError):
self.app.CommonControlsSample.TVS_HASBUTTON.check()
class NonAdminTestCases(ApplicationWarningTestCases):
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
super(NonAdminTestCases, self).setUp()
self.app = Application().start(self.sample_exe)
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
super(NonAdminTestCases, self).tearDown()
def test_both_non_admin(self):
warnings.filterwarnings('always', category=UserWarning, append=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.app = Application().connect(title="Common Controls Sample", timeout=5)
assert len(w) == 0
def test_both_non_admin_click(self):
self.app = Application().connect(title="Common Controls Sample", timeout=5)
self.app.CommonControlsSample.TVS_HASBUTTON.check()
self.assertEqual(self.app.CommonControlsSample.TVS_HASBUTTON.is_checked(), True)
self.app.CommonControlsSample.OK.click()
self.app.CommonControlsSample.wait_not('visible')
class ApplicationTestCases(unittest.TestCase):
"""Unit tests for the application.Application class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.prev_warn = warnings.showwarning
def no_warnings(*args, **kwargs): pass
warnings.showwarning = no_warnings
if is_x64_Python() or not is_x64_OS():
self.notepad_subpath = r"system32\notepad.exe"
else:
self.notepad_subpath = r"SysWOW64\notepad.exe"
def tearDown(self):
"""Close the application after tests"""
#self.dlg.SendMessage(win32defines.WM_CLOSE)
warnings.showwarning = self.prev_warn
def test__init__(self):
"""Verify that Application instance is initialized or not"""
self.assertRaises(ValueError, Application, backend='unregistered')
def test__iter__(self):
"""Verify that Application instance is not iterable"""
app = Application()
app.start(_notepad_exe())
with self.assertRaises(NotImplementedError):
for a in app:
pass
app.kill()
def test_not_connected(self):
"""Verify that it raises when the app is not connected"""
self.assertRaises (AppNotConnected, Application().__getattribute__, 'Hiya')
self.assertRaises (AppNotConnected, Application().__getitem__, 'Hiya')
self.assertRaises (AppNotConnected, Application().window_, title = 'Hiya')
self.assertRaises (AppNotConnected, Application().top_window_,)
def test_start_problem(self):
"""Verify start_ raises on unknown command"""
self.assertRaises (AppStartError, Application().start, 'Hiya')
def test_start(self):
"""test start() works correctly"""
app = Application()
self.assertEqual(app.process, None)
app.start(_notepad_exe())
self.assertNotEqual(app.process, None)
self.assertEqual(app.UntitledNotepad.process_id(), app.process)
notepadpath = os.path.join(os.environ['systemroot'], self.notepad_subpath)
self.assertEqual(str(process_module(app.process)).lower(), str(notepadpath).lower())
app.UntitledNotepad.menu_select("File->Exit")
def testStart_bug01(self):
"""On SourceForge forum AppStartError forgot to include %s for application name"""
app = Application()
self.assertEqual(app.process, None)
application.app_start_timeout = 1
app_name = r"I am not * and Application!/\.exe"
try:
app.start(app_name)
except AppStartError as e:
self.assertEqual(app_name in str(e), True)
# def testset_timing(self):
# """Test that set_timing sets the timing correctly"""
# prev_timing = (
# application.window_find_timeout,
# application.window_retry_interval,
# application.app_start_timeout,
# application.exists_timeout,
# application.exists_retry_interval,
# hwndwrapper.delay_after_click,
# hwndwrapper.delay_after_menuselect,
# hwndwrapper.delay_after_sendkeys_key,
# hwndwrapper.delay_after_button_click,
# hwndwrapper.delay_before_after_close_click,
# )
# set_timing(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
#
# self.assertEqual(
# (
# application.window_find_timeout,
# application.window_retry_interval,
# application.app_start_timeout,
# application.exists_timeout,
# application.exists_retry_interval,
# hwndwrapper.delay_after_click,
# hwndwrapper.delay_after_menuselect,
# hwndwrapper.delay_after_sendkeys_key,
# hwndwrapper.delay_after_button_click,
# hwndwrapper.delay_before_after_close_click,
# ), (1, 2, 3, 4, 5, 6, 7, 8, 9, 10) )
#
# set_timing(*prev_timing)
def test_connect_path(self):
"""Test that connect_() works with a path"""
app1 = Application()
app1.start(_notepad_exe())
app_conn = Application()
app_conn.connect(path=self.notepad_subpath)
self.assertEqual(app1.process, app_conn.process)
app_conn = Application()
if is_x64_Python() or not is_x64_OS():
app_conn.connect(path=r"c:\windows\system32\notepad.exe")
else:
app_conn.connect(path=r"c:\windows\syswow64\notepad.exe")
self.assertEqual(app1.process, app_conn.process)
accessible_modules = process_get_modules()
accessible_process_names = [os.path.basename(name.lower()) for process, name, cmdline in accessible_modules]
self.assertEqual('notepad.exe' in accessible_process_names, True)
app_conn.UntitledNotepad.menu_select('File->Exit')
def test_connect_path_timeout(self):
"""Test that connect_() works with a path with timeout"""
app1 = Application()
def delayed_launch():
time.sleep(2)
app1.start(_notepad_exe())
thread = Thread(target=delayed_launch)
thread.start()
app_conn = Application()
app_conn.connect(path=_notepad_exe(), timeout=3)
self.assertEqual(app1.process, app_conn.process)
accessible_modules = process_get_modules()
accessible_process_names = [os.path.basename(name.lower()) for process, name, cmdline in accessible_modules]
self.assertEqual('notepad.exe' in accessible_process_names, True)
app1.UntitledNotepad.menu_select('File->Exit')
def test_connect_path_timeout_problem(self):
"""Test that connect_() raise error when no process start"""
app1 = Application()
def delayed_launch():
time.sleep(1)
app1.start(_notepad_exe())
thread = Thread(target=delayed_launch)
thread.start()
self.assertRaises(ProcessNotFoundError, Application().connect, path=_notepad_exe(), timeout=0.5)
time.sleep(0.7)
app1.UntitledNotepad.menu_select('File->Exit')
def test_connect_process_timeout_failed(self):
"""Test that connect_(process=...) raise error when set timeout"""
app1 = Application()
app1.start(_notepad_exe())
self.assertRaises(ProcessNotFoundError, Application().connect, process=0, timeout=0.5)
app1.UntitledNotepad.menu_select('File->Exit')
# def test_Connect(self):
# """Test that connect_() works with a path"""
# app1 = Application()
# app1.start_("notepad.exe")
#
# app_conn = Application()
# app_conn.connect_(path = r"system32\notepad.exe")
# self.assertEqual(app1.process, app_conn.process)
#
# app_conn = Application()
# app_conn.connect_(path = r"c:\windows\system32\notepad.exe")
# self.assertEqual(app1.process, app_conn.process)
#
# app_conn.UntitledNotepad.menu_select('File->Exit')
def test_connect_process(self):
"""Test that connect_() works with a process"""
app1 = Application()
app1.start(_notepad_exe())
app_conn = Application()
app_conn.connect(process=app1.process)
self.assertEqual(app1.process, app_conn.process)
app_conn.UntitledNotepad.menu_select('File->Exit')
def test_connect_handle(self):
"""Test that connect_() works with a handle"""
app1 = Application()
app1.start(_notepad_exe())
handle = app1.UntitledNotepad.handle
app_conn = Application()
app_conn.connect(handle=handle)
self.assertEqual(app1.process, app_conn.process)
app_conn.UntitledNotepad.menu_select('File->Exit')
def test_connect_windowspec(self):
"""Test that connect_() works with a windowspec"""
app1 = Application()
app1.start(_notepad_exe())
#unused var: handle = app1.UntitledNotepad.handle
app_conn = Application()
try:
app_conn.connect(title = "Untitled - Notepad")
except findwindows.WindowAmbiguousError:
wins = findwindows.find_elements(active_only = True, title = "Untitled - Notepad")
app_conn.connect(handle = wins[0].handle)
except findwindows.ElementNotFoundError:
WaitUntil(30, 0.5, lambda: len(findwindows.find_elements(active_only = True, title = "Untitled - Notepad")) > 0)
wins = findwindows.find_elements(active_only = True, title = "Untitled - Notepad")
app_conn.connect(handle = wins[0].handle)
self.assertEqual(app1.process, app_conn.process)
app_conn.UntitledNotepad.menu_select('File->Exit')
def test_connect_raises(self):
"""Test that connect_() raises with invalid input"""
# try an argument that does not exist
self.assertRaises (
TypeError,
Application().connect, **{'not_arg': 23})
self.assertRaises (
RuntimeError,
Application().connect)
# try to pass an invalid process
self.assertRaises (
ProcessNotFoundError,
Application().connect, **{'process': 0})
# try to pass an invalid handle
self.assertRaises(
RuntimeError,
Application().connect, **{'handle' : 0})
# try to pass an invalid path
self.assertRaises(
ProcessNotFoundError,
Application().connect, **{'path': "no app here", 'timeout': 0.0})
def test_top_window(self):
"""Test that top_window_() works correctly"""
Timings.window_find_timeout = 5
app = Application()
self.assertRaises(AppNotConnected, app.top_window_)
app.start(_notepad_exe())
self.assertEqual(app.UntitledNotepad.handle, app.top_window_().handle)
app.UntitledNotepad.menu_select("Help->About Notepad")
self.assertEqual(app.AboutNotepad.handle, app.top_window_().handle)
app.AboutNotepad.Ok.Click()
app.UntitledNotepad.menu_select("File->Exit")
app.UntitledNotepad.wait_not('exists')
self.assertRaises(RuntimeError, app.top_window_)
def test_active_window(self):
"""Test that active_() works correctly"""
app = Application()
self.assertRaises(AppNotConnected, app.active_)
self.assertRaises(AppNotConnected, app.is64bit)
app.start(_notepad_exe())
app.UntitledNotepad.wait('ready')
self.assertEqual(app.active_().handle, app.UntitledNotepad.handle)
app.UntitledNotepad.menu_select("File->Exit")
app.UntitledNotepad.wait_not('exists')
self.assertRaises(RuntimeError, app.active_)
def test_cpu_usage(self):
"""Verify that cpu_usage() works correctly"""
app = Application()
self.assertRaises(AppNotConnected, app.cpu_usage)
app.start(_notepad_exe())
self.assertEqual(0.0 <= app.cpu_usage() <= 100.0, True)
app.UntitledNotepad.menu_select("File->Exit")
app.UntitledNotepad.wait_not('exists')
def test_wait_cpu_usage_lower(self):
"""Test that wait_cpu_usage_lower() works correctly"""
if is_x64_Python() != is_x64_OS():
return None
Application().Start(r'explorer.exe')
def _cabinetwclass_exist():
"Verify if at least one active 'CabinetWClass' window is created"
l = findwindows.find_elements(active_only = True, class_name = 'CabinetWClass')
return (len(l) > 0)
WaitUntil(40, 0.5, _cabinetwclass_exist)
handle = findwindows.find_elements(active_only = True, class_name = 'CabinetWClass')[-1].handle
window = WindowSpecification({'handle': handle, 'backend': 'win32', })
explorer = Application().Connect(process = window.process_id())
try:
explorer.WaitCPUUsageLower(threshold = 1.5, timeout = 60, usage_interval = 2)
window.AddressBandRoot.ClickInput()
window.TypeKeys(r'Control Panel\Programs\Programs and Features', with_spaces=True, set_foreground=True)
window.TypeKeys(r'{ENTER}', set_foreground = False)
WaitUntil(40, 0.5, lambda: len(findwindows.find_elements(active_only = True,
title = 'Programs and Features',
class_name='CabinetWClass')) > 0)
explorer.WaitCPUUsageLower(threshold = 1.5, timeout = 60, usage_interval = 2)
installed_programs = window.FolderView.texts()[1:]
programs_list = ','.join(installed_programs)
if ('Microsoft' not in programs_list) and ('Python' not in programs_list):
hwndwrapper.ImageGrab.grab().save(r'explorer_screenshot.jpg')
hwndwrapper.ActionLogger().log('\ninstalled_programs:\n')
for prog in installed_programs:
hwndwrapper.ActionLogger().log(prog)
self.assertEqual(('Microsoft' in programs_list) or ('Python' in programs_list), True)
finally:
window.Close(2.0)
if UIA_support:
def test_wait_cpu_usage_lower_uia(self):
"""Test that wait_cpu_usage_lower() works correctly for UIA"""
app = Application(backend='uia')
app.start('notepad.exe')
try:
app.wait_cpu_usage_lower(threshold = 1.5, timeout = 30, usage_interval = 2)
finally:
app.kill()
app.cpu_usage = mock.Mock(return_value=10)
self.assertRaises(
RuntimeError, app.wait_cpu_usage_lower,
threshold = 9.0, timeout = 5, usage_interval = 0.5
)
# def test_wait_for_idle_exception(self):
# """Test that method start() raises an exception when wait for idle failed"""
# app = Application()
# self.assertRaises(Exception, app.start, 'cmd.exe')
# # TODO: test and fix the case when cmd.exe can't be killed by app.kill()
def test_windows(self):
"""Test that windows_() works correctly"""
Timings.window_find_timeout = 5
app = Application()
self.assertRaises(AppNotConnected, app.windows_, **{'title' : 'not connected'})
app.start('notepad.exe')
self.assertRaises(ValueError, app.windows_, **{'backend' : 'uia'})
notepad_handle = app.UntitledNotepad.handle
self.assertEqual(app.windows_(visible_only = True), [notepad_handle])
app.UntitledNotepad.menu_select("Help->About Notepad")
aboutnotepad_handle = app.AboutNotepad.handle
self.assertEqual(
app.windows_(visible_only = True, enabled_only = False),
[aboutnotepad_handle, notepad_handle])
app.AboutNotepad.OK.Click()
app.UntitledNotepad.menu_select("File->Exit")
def test_window(self):
"""Test that window_() works correctly"""
app = Application()
self.assertRaises(AppNotConnected, app.window_, **{'title' : 'not connected'})
app.start(_notepad_exe())
self.assertRaises(ValueError, app.windows_, **{'backend' : 'uia'})
title = app.window_(title = "Untitled - Notepad")
title_re = app.window_(title_re = "Untitled[ -]+Notepad")
classname = app.window_(class_name = "Notepad")
classname_re = app.window_(class_name_re = "Not..ad")
handle = app.window_(handle = title.handle)
bestmatch = app.window_(best_match = "Untiotled Notepad")
self.assertNotEqual(title.handle, None)
self.assertNotEqual(title.handle, 0)
self.assertEqual(title.handle, title_re.handle)
self.assertEqual(title.handle, classname.handle)
self.assertEqual(title.handle, classname_re.handle)
self.assertEqual(title.handle, handle.handle)
self.assertEqual(title.handle, bestmatch.handle)
app.UntitledNotepad.menu_select("File->Exit")
def test_getitem(self):
"""Test that __getitem__() works correctly"""
Timings.window_find_timeout = 5
app = Application()
app.start(_notepad_exe())
self.assertRaises(Exception, app['blahblah'])
self.assertRaises(
findbestmatch.MatchError,
app['blahblah']['not here'].__getitem__, 'handle')
self.assertEqual(
app[u'Unt\xeftledNotepad'].handle,
app.window_(title = "Untitled - Notepad").handle)
app.UntitledNotepad.menu_select("Help->About Notepad")
self.assertEqual(
app['AboutNotepad'].handle,
app.window_(title = "About Notepad").handle)
app.AboutNotepad.Ok.Click()
app.UntitledNotepad.menu_select("File->Exit")
def test_getattribute(self):
"""Test that __getattribute__() works correctly"""
Timings.window_find_timeout = 5
app = Application()
app.start(_notepad_exe())
self.assertRaises(
findbestmatch.MatchError,
app.blahblah.__getattribute__, 'handle')
self.assertEqual(
app.UntitledNotepad.handle,
app.window_(title = "Untitled - Notepad").handle)
app.UntitledNotepad.menu_select("Help->About Notepad")
# I think it's OK that this no longer raises a matcherror
# just because the window is not enabled - doesn't mean you
# should not be able to access it at all!
#self.assertRaises(findbestmatch.MatchError,
# app.Notepad.__getattribute__, 'handle')
self.assertEqual(
app.AboutNotepad.handle,
app.window(title = "About Notepad").handle)
app.AboutNotepad.Ok.Click()
app.UntitledNotepad.menu_select("File->Exit")
def test_kill(self):
"""test killing the application"""
app = Application()
app.start(_notepad_exe())
app.UntitledNotepad.Edit.type_keys("hello")
app.UntitledNotepad.menu_select("File->Print...")
#app.Print.FindPrinter.Click() # Vasily: (Win7 x64) "Find Printer" dialog is from splwow64.exe process
#app.FindPrinters.Stop.Click()
app.kill()
self.assertRaises(AttributeError, app.UntitledNotepad.Edit)
def test_process_is_running(self):
"""Tests process is running and wait for exit function"""
app = Application()
app.start(_notepad_exe())
app.UntitledNotepad.wait("ready")
self.assertTrue(app.is_process_running())
self.assertRaises(TimeoutError, lambda: app.wait_for_process_exit(timeout=5, retry_interval=1))
app.kill()
app.wait_for_process_exit()
self.assertFalse(app.is_process_running())
def test_should_return_not_running_if_not_started(self):
"""Tests that works on new instance
is_process_running/wait_for_process_exit can be called on not started/disconnected instance
"""
app = Application()
app.wait_for_process_exit(timeout=10, retry_interval=1)
self.assertFalse(app.is_process_running())
class TestInheritedApp(Application):
"""Our inherited version of class"""
def test_method(self):
"""This method should be called without any issues"""
return self is not None
def test_application_inheritance(self):
"""Test that Application class can be inherited and has it's own methods"""
app = ApplicationTestCases.TestInheritedApp()
self.assertTrue(app.test_method())
def test_non_magic_application(self):
app = Application()
self.assertEqual(app.allow_magic_lookup, True)
app_no_magic = Application(allow_magic_lookup=False)
self.assertEqual(app_no_magic.allow_magic_lookup, False)
app_no_magic.start(_notepad_exe())
window = app_no_magic.window(best_match="UntitledNotepad")
dlg = window.child_window(best_match="Edit")
dlg.draw_outline()
with self.assertRaises(AttributeError):
app_no_magic.UntitledNotepad
with self.assertRaises(AttributeError):
window.Edit
app_no_magic.kill()
app_no_magic.wait_for_process_exit()
class WindowSpecificationTestCases(unittest.TestCase):
"""Unit tests for the application.Application class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application().start("Notepad")
self.dlgspec = self.app.UntitledNotepad
self.ctrlspec = self.app.UntitledNotepad.Edit
def tearDown(self):
"""Close the application after tests"""
# close the application
#self.app.UntitledNotepad.menu_select("File->Exit")
self.app.kill()
def test__init__(self):
"""Test creating a new spec by hand"""
wspec = WindowSpecification(
dict(
best_match=u"UntitledNotepad",
app=self.app)
)
self.assertEqual(
wspec.window_text(),
u"Untitled - Notepad")
def test__init__both_keywords(self):
"""Test creating a new spec with ambiguity by process and app simultaneously"""
self.assertRaises(KeyError, WindowSpecification,
dict(best_match=u"UntitledNotepad", app=self.app, process=self.app.process)
)
def test__call__(self):
"""Test that __call__() correctly raises an error"""
self.assertRaises(AttributeError, self.dlgspec)
self.assertRaises(AttributeError, self.ctrlspec)
# no best_match!
wspec = WindowSpecification(
dict(title=u"blah", app=self.app)
)
self.assertRaises(AttributeError, wspec)
def test_wrapper_object(self):
"""Test that we can get a control"""
self.assertEqual(True, isinstance(self.dlgspec, WindowSpecification))
self.assertEqual(
True,
isinstance(self.dlgspec.wrapper_object(), hwndwrapper.HwndWrapper)
)
def test_window(self):
"""test specifying a sub window of an existing specification"""
sub_spec = self.dlgspec.child_window(class_name = "Edit")
sub_spec_legacy = self.dlgspec.window(class_name = "Edit")
self.assertEqual(True, isinstance(sub_spec, WindowSpecification))
self.assertEqual(sub_spec.class_name(), "Edit")
self.assertEqual(sub_spec_legacy.class_name(), "Edit")
def test__getitem__(self):
"""test item access of a windowspec"""
self.assertEqual(
True,
isinstance(self.dlgspec['Edit'], WindowSpecification)
)
self.assertEqual(self.dlgspec['Edit'].class_name(), "Edit")
self.assertRaises(AttributeError, self.ctrlspec.__getitem__, 'edit')
def test_getattr(self):
"""Test getting attributes works correctly"""
self.assertEqual(
True,
isinstance(self.dlgspec.Edit, WindowSpecification)
)
self.assertEqual(self.dlgspec.Edit.class_name(), "Edit")
# check that getting a dialog attribute works correctly
self.assertEqual(
"Notepad",
self.dlgspec.class_name())
# Check handling 'parent' as a WindowSpecification
spec = self.ctrlspec.child_window(parent=self.dlgspec)
self.assertEqual(spec.class_name(), "Edit")
def test_non_magic_getattr(self):
ws = WindowSpecification(dict(best_match="Notepad"))
self.assertEqual(ws.allow_magic_lookup, True)
ws_no_magic = WindowSpecification(dict(best_match="Notepad"), allow_magic_lookup=False)
self.assertEqual(ws_no_magic.allow_magic_lookup, False)
dlg = ws_no_magic.child_window(best_match="Edit")
has_focus = dlg.has_keyboard_focus()
self.assertIn(has_focus, (True, False))
with self.assertRaises(AttributeError):
ws_no_magic.Edit
def test_exists(self):
"""Check that windows exist"""
self.assertEqual(True, self.dlgspec.exists())
self.assertEqual(True, self.dlgspec.exists(0))
self.assertEqual(True, self.ctrlspec.exists())
# TODO: test a control that is not visible but exists
#self.assertEqual(True, self.app.DefaultIME.exists())
start = timestamp()
self.assertEqual(False, self.app.BlahBlah.exists(timeout=.1))
self.assertEqual(True, timestamp() - start < .3)
start = timestamp()
self.assertEqual(False, self.app.BlahBlah.exists(timeout=3))
self.assertEqual(True, 2.7 < timestamp() - start < 3.3)
def test_exists_timing(self):
"""test the timing of the exists method"""
# try ones that should be found immediately
start = timestamp()
self.assertEqual(True, self.dlgspec.exists())
self.assertEqual(True, timestamp() - start < .3)
start = timestamp()
self.assertEqual(True, self.ctrlspec.exists())
self.assertEqual(True, timestamp() - start < .3)
# try one that should not be found
start = timestamp()
self.assertEqual(True, self.dlgspec.exists(.5))
timedif = timestamp() - start
self.assertEqual(True, .49 > timedif < .6)
def test_wait(self):
"""test the functionality and timing of the wait method"""
allowable_error = .2
start = timestamp()
self.assertEqual(self.dlgspec.wrapper_object(), self.dlgspec.wait("enaBleD "))
time_taken = (timestamp() - start)
if not 0 <= time_taken < (0 + 2 * allowable_error):
self.assertEqual(.02, time_taken)
start = timestamp()
self.assertEqual(self.dlgspec.wrapper_object(), self.dlgspec.wait(" ready"))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.wrapper_object(), self.dlgspec.wait(" exiSTS"))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.wrapper_object(), self.dlgspec.wait(" VISIBLE "))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.wrapper_object(), self.dlgspec.wait(" ready enabled"))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.wrapper_object(), self.dlgspec.wait("visible exists "))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.wrapper_object(), self.dlgspec.wait("exists "))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.wrapper_object(), self.dlgspec.wait("actIve "))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
self.assertRaises(SyntaxError, self.dlgspec.Wait, "Invalid_criteria")
def test_wait_non_existing(self):
"""test timing of the wait method for non-existing element"""
allowable_error = .2
start = timestamp()
self.assertRaises(TimeoutError, self.app.BlahBlah.wait, 'exists')
expected = Timings.window_find_timeout
self.assertEqual(True, expected - allowable_error <= (timestamp() - start) < expected + allowable_error)
def test_wait_invisible(self):
"""test timing of the wait method for non-existing element and existing invisible one"""
# TODO: re-use an MFC sample for this test
allowable_error = .2
start = timestamp()
self.assertRaises(TimeoutError, self.app.BlahBlah.wait, 'visible')
expected = Timings.window_find_timeout
self.assertEqual(True, expected - allowable_error <= (timestamp() - start) < expected + allowable_error)
# make sure Status Bar is not visible
status_bar_menu = self.app.UntitledNotepad.menu().item('&View').sub_menu().item('&Status Bar')
if status_bar_menu.is_checked():
status_bar_menu.select()
# check that existing invisible control is still found with 'exists' criterion
status_bar_spec = self.app.UntitledNotepad.child_window(class_name="msctls_statusbar32", visible_only=False)
self.assertEqual('StatusBar', status_bar_spec.wait('exists').friendly_class_name())
start = timestamp()
self.assertRaises(TimeoutError, status_bar_spec.wait, 'exists visible')
self.assertEqual(True, expected - allowable_error <= (timestamp() - start) < expected + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, status_bar_spec.wait, 'visible exists')
self.assertEqual(True, expected - allowable_error <= (timestamp() - start) < expected + allowable_error)
def test_wait_not(self):
"""
Test that wait not fails for all the following
* raises and error when criteria not met
* timing is close to the timeout value
"""
allowable_error = .16
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, "enaBleD ", .1, .05)
taken = timestamp() - start
if .1 < (taken) > .1 + allowable_error:
self.assertEqual(.12, taken)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, " ready", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, " exiSTS", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, " VISIBLE ", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, " ready enabled", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, "visible exists ", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, "exists ", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, "actIve ", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
self.assertRaises(SyntaxError, self.dlgspec.wait_not, "Invalid_criteria")
# def test_wait_ready(self):
# """Make sure the friendly class is set correctly"""
# allowable_error = .02
#
# start = timestamp()
# self.assertEqual(self.dlgspec.ctrl_(), self.dlgspec.WaitReady(.1, .05))
#
# # it it didn't finish in the allocated time then raise an error
# # we assertEqual to something that we know is not right - to get a
# # better error report
# if not 0 <= (timestamp() - start) < 0 + allowable_error:
# self.assertEqual(0, timestamp() - start)
# #self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
#
#
# def testWaitNotReady(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertRaises(RuntimeError, self.dlgspec.WaitNotReady, .1, .05)
#
# if not .1 <= (timestamp() - start) < .1 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
#
# #self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
#
#
# def testWaitEnabled(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertEqual(self.dlgspec.ctrl_(), self.dlgspec.WaitEnabled(.1, .05))
#
# if not 0 <= (timestamp() - start) < 0 + allowable_error:
# self.assertEqual(0, timestamp() - start)
#
# #self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
#
#
# def testWaitNotEnabled(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertRaises(RuntimeError, self.dlgspec.WaitNotEnabled, .1, .05)
# if not .1 <= (timestamp() - start) < .1 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
# #self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
#
# def testWaitVisible(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertEqual(self.dlgspec.ctrl_(), self.dlgspec.WaitVisible(.1, .05))
# if not 0 <= (timestamp() - start) < 0 + allowable_error:
# self.assertEqual(0, timestamp() - start)
# #self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
#
# def testWaitNotVisible(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertRaises(RuntimeError, self.dlgspec.WaitNotVisible, .1, .05)
# # it it didn't finish in the allocated time then raise an error
# # we assertEqual to something that we know is not right - to get a
# # better error report
# if not .1 <= (timestamp() - start) < .1 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
#
# def testWaitExists(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertEqual(self.dlgspec.ctrl_(), self.dlgspec.WaitExists(.1, .05))
#
# # it it didn't finish in the allocated time then raise an error
# # we assertEqual to something that we know is not right - to get a
# # better error report
# if not 0 <= (timestamp() - start) < 0 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
#
# def testWaitNotExists(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertRaises(RuntimeError, self.dlgspec.WaitNotExists, .1, .05)
# if not .1 <= (timestamp() - start) < .1 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
# #self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
def test_depth(self):
"""Test that descendants() with depth works correctly"""
self.dlgspec.menu_select("Format -> Font")
self.assertNotEqual(
len(self.app['Font'].descendants(depth=1)),
len(self.app['Font'].descendants(depth=2)))
def test_print_control_identifiers(self):
"""Make sure print_control_identifiers() doesn't crash"""
self.dlgspec.print_control_identifiers()
self.ctrlspec.print_control_identifiers()
def test_print_control_identifiers_file_output(self):
"""Make sure print_control_identifiers() creates correct file"""
output_filename = "test_print_control_identifiers.txt"
self.dlgspec.print_ctrl_ids(filename=output_filename)
if os.path.isfile(output_filename):
with open(output_filename, "r") as test_log_file:
content = str(test_log_file.readlines())
self.assertTrue("'Untitled - NotepadEdit'" in content
and "'Edit'" in content)
self.assertTrue("child_window(class_name=\"msctls_statusbar32\"" in content)
os.remove(output_filename)
else:
self.fail("print_control_identifiers can't create a file")
self.ctrlspec.dump_tree(filename=output_filename)
if os.path.isfile(output_filename):
with open(output_filename, "r") as test_log_file:
content = str(test_log_file.readlines())
self.assertTrue("child_window(class_name=\"Edit\")" in content)
os.remove(output_filename)
else:
self.fail("print_control_identifiers can't create a file")
def test_find_elements_re(self):
"""Test for bug #90: A crash in 'find_elements' when called with 'title_re' argument"""
self.dlgspec.wait('visible')
windows = findwindows.find_elements(title_re = "Untitled - Notepad")
self.assertTrue(len(windows) >= 1)
class WaitUntilDecoratorTests(unittest.TestCase):
"""Unit tests for always_wait_until and always_wait_until_passes decorators"""
def test_always_wait_until_decorator_success(self):
"""Test always_wait_until_decorator success"""
@always_wait_until(4, 2)
def foo():
return True
self.assertTrue(foo())
def test_always_wait_until_decorator_failure(self):
"""Test wait_until_decorator failure"""
@always_wait_until(4, 2)
def foo():
return False
self.assertRaises(TimeoutError, foo)
def test_always_wait_until_passes_decorator_success(self):
"""Test always_wait_until_passes_decorator success"""
@always_wait_until_passes(4, 2)
def foo():
return True
self.assertTrue(foo())
def test_always_wait_until_passes_decorator_failure(self):
"""Test always_wait_until_passes_decorator failure"""
@always_wait_until_passes(4, 2)
def foo():
raise Exception("Unexpected Error in foo")
self.assertRaises(TimeoutError, foo)
class MultiLevelWindowSpecificationTests(unittest.TestCase):
"""Unit tests for multi-level (3+) WindowSpecification objects"""
if UIA_support:
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.slow()
self.app = Application(backend='uia').start(os.path.join(mfc_samples_folder, u"RowList.exe"))
self.dlg = self.app.RowListSampleApplication
def tearDown(self):
"""Close the application after tests"""
self.dlg.CloseButton.click()
self.dlg.wait_not('visible')
def test_3level_specification(self):
"""Test that controls can be accessed by 3 levels of attributes"""
self.dlg.Toolbar.About.click()
self.dlg.AboutRowList.OK.click()
#self.dlg.AboutRowList.wait_not('visible') # XXX: it takes more than 50 seconds!
else: # Win32
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application(backend='win32').start(os.path.join(mfc_samples_folder, u"CmnCtrl3.exe"))
self.dlg = self.app.CommonControlsSample
def tearDown(self):
"""Close the application after tests"""
self.dlg.SendMessage(win32defines.WM_CLOSE)
def test_4level_specification(self):
"""Test that controls can be accessed by 4 levels of attributes"""
self.assertEqual(self.dlg.CPagerCtrl.Pager.Toolbar.button_count(), 12)
if UIA_support:
class DesktopUiaWindowSpecificationTests(unittest.TestCase):
"""Unit tests for Desktop(backend='uia') object"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.slow()
self.app = Application().start('explorer.exe "' + mfc_samples_folder_32 + '"')
self.desktop = Desktop(backend='uia')
self.desktop_no_magic = Desktop(backend='uia', allow_magic_lookup=False)
def tearDown(self):
"""Close the application after tests"""
self.desktop.MFC_samplesDialog.close()
self.desktop.MFC_samplesDialog.wait_not('exists')
def test_folder_list(self):
"""Test that ListViewWrapper returns correct files list in explorer.exe"""
files_list = self.desktop.MFC_samplesDialog.Shell_Folder_View.Items_View.wrapper_object()
self.assertEqual([item.window_text() for item in files_list.get_items()],
[u'x64', u'BCDialogMenu.exe', u'CmnCtrl1.exe', u'CmnCtrl2.exe', u'CmnCtrl3.exe',
u'CtrlTest.exe', u'mfc100u.dll', u'RebarTest.exe', u'RowList.exe', u'TrayMenu.exe'])
self.assertEqual(files_list.item('RebarTest.exe').window_text(), 'RebarTest.exe')
def test_set_backend_to_window_uia(self):
"""Set backend to method window(), except exception ValueError"""
with self.assertRaises(ValueError):
self.desktop.window(backend='uia', title='MFC_samplesDialog')
with self.assertRaises(ValueError):
self.desktop.window(backend='win32', title='MFC_samplesDialog')
def test_get_list_of_windows_uia(self):
"""Test that method .windows() returns a non-empty list of windows"""
dlgs = self.desktop.windows()
self.assertTrue(len(dlgs) > 1)
def test_set_backend_to_windows_uia(self):
"""Set backend to method windows, except exception ValueError"""
with self.assertRaises(ValueError):
self.desktop.windows(backend='win32')
with self.assertRaises(ValueError):
self.desktop.windows(backend='uia')
def test_only_visible_windows_uia(self):
"""Set visible_only to the method windows"""
dlgs = self.desktop.windows(visible_only=True)
self.assertTrue(all([win.is_visible() for win in dlgs]))
def test_only_enable_windows_uia(self):
"""Set enable_only to the method windows"""
dlgs = self.desktop.windows(enabled_only=True)
self.assertTrue(all([win.is_enabled() for win in dlgs]))
def test_non_magic_desktop(self):
from pywinauto.controls.uiawrapper import UIAWrapper
self.assertEqual(self.desktop.allow_magic_lookup, True)
self.assertEqual(self.desktop_no_magic.allow_magic_lookup, False)
dlgs = self.desktop_no_magic.windows()
self.assertTrue(len(dlgs) > 1)
window = self.desktop_no_magic.window(title="MFC_samples")
self.assertEqual(window.allow_magic_lookup, False)
dlg = window.child_window(class_name="ShellTabWindowClass").wrapper_object()
self.assertIsInstance(dlg, UIAWrapper)
has_focus = dlg.has_keyboard_focus()
self.assertIn(has_focus, (True, False))
with self.assertRaises(AttributeError):
self.desktop_no_magic.MFC_samples
with self.assertRaises(AttributeError):
window.ShellTabWindowClass
class DesktopWin32WindowSpecificationTests(unittest.TestCase):
"""Unit tests for Desktop(backend='win32') object"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application(backend='win32').start(os.path.join(mfc_samples_folder, u"CmnCtrl3.exe"))
self.desktop = Desktop(backend='win32')
self.desktop_no_magic = Desktop(backend='win32', allow_magic_lookup=False)
self.window_title = 'Common Controls Sample'
def tearDown(self):
"""Close the application after tests"""
self.desktop.window(title=self.window_title, process=self.app.process).SendMessage(win32defines.WM_CLOSE)
def test_simple_access_through_desktop(self):
"""Test that controls can be accessed by 4 levels of attributes"""
dlg = self.desktop.window(title=self.window_title, process=self.app.process)
self.assertEqual(dlg.Pager.Toolbar.button_count(), 12)
def test_set_backend_to_window_win32(self):
"""Set backend to method window(), except exception ValueError"""
with self.assertRaises(ValueError):
self.desktop.window(backend='uia', title=self.window_title, process=self.app.process)
with self.assertRaises(ValueError):
self.desktop.window(backend='win32', title=self.window_title, process=self.app.process)
def test_get_list_of_windows_win32(self):
"""Test that method .windows() returns a non-empty list of windows"""
dlgs = self.desktop.windows()
self.assertTrue(len(dlgs) > 1)
window_titles = [win_obj.window_text() for win_obj in dlgs]
self.assertTrue(self.window_title in window_titles)
def test_set_backend_to_windows_win32(self):
"""Set backend to method windows, except exception ValueError"""
with self.assertRaises(ValueError):
self.desktop.windows(backend='win32')
with self.assertRaises(ValueError):
self.desktop.windows(backend='uia')
def test_only_visible_windows_win32(self):
"""Set visible_only to the method windows"""
dlgs = self.desktop.windows(visible_only=True)
self.assertTrue(all([win.is_visible() for win in dlgs]))
def test_only_enable_windows_win32(self):
"""Set enable_only to the method windows"""
dlgs = self.desktop.windows(enabled_only=True)
self.assertTrue(all([win.is_enabled() for win in dlgs]))
def test_from_point_win32(self):
"""Test method Desktop(backend='win32').from_point(x, y)"""
combo = self.app.Common_Controls_Sample.ComboBox.wrapper_object()
x, y = combo.rectangle().mid_point()
combo_from_point = self.desktop.from_point(x, y)
self.assertEqual(combo, combo_from_point)
def test_top_from_point_win32(self):
"""Test method Desktop(backend='win32').top_from_point(x, y)"""
combo = self.app.Common_Controls_Sample.ComboBox.wrapper_object()
dlg = self.app.Common_Controls_Sample.wrapper_object()
x, y = combo.rectangle().mid_point()
dlg_from_point = self.desktop.top_from_point(x, y)
self.assertEqual(dlg, dlg_from_point)
def test_non_magic_desktop(self):
self.assertEqual(self.desktop.allow_magic_lookup, True)
self.assertEqual(self.desktop_no_magic.allow_magic_lookup, False)
window = self.desktop_no_magic.window(title=self.window_title, process=self.app.process)
self.assertEqual(window.allow_magic_lookup, False)
dlg = window.child_window(class_name="msctls_trackbar32").wrapper_object()
self.assertIsInstance(dlg, TrackbarWrapper)
pos = dlg.get_position()
self.assertIsInstance(pos, six.integer_types)
with self.assertRaises(AttributeError):
getattr(self.desktop_no_magic, self.window_title.replace(" ", "_"))
with self.assertRaises(AttributeError):
window.msctls_trackbar32
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "1693b474d64a8f02dd3e4e48a5b033fc",
"timestamp": "",
"source": "github",
"line_count": 1416,
"max_line_length": 124,
"avg_line_length": 41.0225988700565,
"alnum_prop": 0.6116753890648671,
"repo_name": "airelil/pywinauto",
"id": "1cf48c88e94a26c51b98948221dfd43f08de72b9",
"size": "59835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pywinauto/unittests/test_application.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1554"
},
{
"name": "PowerShell",
"bytes": "5771"
},
{
"name": "Python",
"bytes": "1922138"
},
{
"name": "XSLT",
"bytes": "3485"
}
],
"symlink_target": ""
} |
import random
from argparse import Namespace
from contextlib import contextmanager
from pathlib import Path
import pytest
from vedro import Scenario
from vedro.core import ArgumentParser, ConfigType, Dispatcher, VirtualScenario
from vedro.events import ArgParsedEvent, ArgParseEvent, ConfigLoadedEvent
from vedro.plugins.orderer import Orderer, OrdererPlugin
@pytest.fixture()
def dispatcher() -> Dispatcher:
return Dispatcher()
@pytest.fixture()
def orderer(dispatcher: Dispatcher) -> OrdererPlugin:
orderer = OrdererPlugin(Orderer)
orderer.subscribe(dispatcher)
return orderer
@contextmanager
def seeded(seed: str):
state = random.getstate()
random.seed(seed)
yield random.seed(seed)
random.setstate(state)
def make_vscenario(path: str) -> VirtualScenario:
class _Scenario(Scenario):
__file__ = Path(path).absolute()
return VirtualScenario(_Scenario, steps=[])
async def fire_config_loaded_event(dispatcher: Dispatcher, config: ConfigType) -> None:
config_loaded_event = ConfigLoadedEvent(Path(), config)
await dispatcher.fire(config_loaded_event)
async def fire_arg_parse_event(dispatcher: Dispatcher) -> None:
arg_parse_event = ArgParseEvent(ArgumentParser())
await dispatcher.fire(arg_parse_event)
def make_arg_parsed_event(*, order_stable: bool = False,
order_reversed: bool = False,
order_random: bool = False) -> ArgParsedEvent:
return ArgParsedEvent(
Namespace(order_stable=order_stable,
order_reversed=order_reversed,
order_random=order_random))
| {
"content_hash": "565fd5972dbff363ebb8ba817bc7635b",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 87,
"avg_line_length": 28.719298245614034,
"alnum_prop": 0.7116676847892486,
"repo_name": "nikitanovosibirsk/vedro",
"id": "fe5b281fcda76242905349db2352e333f91f6fb9",
"size": "1637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/plugins/orderer/_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1384"
},
{
"name": "Python",
"bytes": "416588"
}
],
"symlink_target": ""
} |
import parmed
import tempfile
import os
import shutil
# Get the paths to the gaff.dat and leaprc files:
leaprc_path = os.path.join(os.getenv("CONDA_PREFIX"), 'dat', 'leap', 'cmd', 'leaprc.gaff')
gaffdat_path = os.path.join(os.getenv("CONDA_PREFIX"), 'dat', 'leap', 'parm', 'gaff.dat')
# Make a temporary directory (otherwise we won't be able to find the gaff.dat)
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
shutil.copy(gaffdat_path, os.getcwd())
# Instantiate the amber parameter set:
amber_params = parmed.amber.AmberParameterSet.from_leaprc(leaprc_path)
# Make an OpenMM parameter set:
openmm_params = parmed.openmm.OpenMMParameterSet.from_parameterset(amber_params, remediate_residues=False)
# Return to the original directory and clean up:
os.chdir(cwd)
shutil.rmtree(tmpdir)
# Save the OpenMM parameter set as gaff.xml:
openmm_params.write("gaff.xml", write_unused=True, improper_dihedrals_ordering='amber')
| {
"content_hash": "328a7aad55886766f7e258e60046d236",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 106,
"avg_line_length": 34.77777777777778,
"alnum_prop": 0.7539936102236422,
"repo_name": "choderalab/perses",
"id": "ecf98c5fb91c2474a41ff97437e64496e9e3039c",
"size": "939",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "perses/data/generate_gaff_xml.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "196409"
},
{
"name": "Python",
"bytes": "2048898"
},
{
"name": "Shell",
"bytes": "960"
},
{
"name": "TeX",
"bytes": "147531"
}
],
"symlink_target": ""
} |
from datetime import datetime
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
class SiteKind:
github = "github"
values = [("github", "github")]
class RepositoryKind:
git = "git"
hg = "hg"
values = [("git", "git"), ("hg", "hg")]
class WikiKind:
github = "github"
values = [("github", "github")]
class IssueTrackerKind:
github = "github"
values = [("github", "github")]
class UserProfile(models.Model):
user = models.OneToOneField(User, related_name='profile')
tag_line = models.CharField(max_length=256, blank=True)
about = models.TextField(blank=True)
class UserLink(models.Model):
user = models.ForeignKey(User, related_name='links')
name = models.CharField(max_length=256)
url = models.URLField(verify_exists=False)
def create_user_profile(**kwargs):
if kwargs['created']:
UserProfile.objects.create(user=kwargs['instance'])
post_save.connect(create_user_profile, sender=User)
class UserSite(models.Model):
user = models.ForeignKey(User)
login = models.CharField(max_length=200)
concrete_type = models.CharField(max_length=16, choices=SiteKind.values)
def __str__(self):
return "%s: %s@%s" % (self.user, self.login, self.concrete_type)
class Project(models.Model):
user = models.ForeignKey(User, related_name='projects')
name = models.CharField(max_length=200)
tag_line = models.CharField(max_length=200)
description = models.TextField()
def __str__(self):
return "%s@%s" % (self.user, self.name)
class Repository(models.Model):
projects = models.ManyToManyField(Project, through='ProjectRepository', related_name='repositories')
url = models.CharField(max_length=200)
concrete_type = models.CharField(max_length=16, choices=RepositoryKind.values)
last_updated = models.DateTimeField()
class Meta:
verbose_name_plural = "repositories"
unique_together = [('concrete_type', 'url')]
def save(self, *args, **kwargs):
"""
Enforce a default last_updated timestamp sometime in the faraway past,
makes it much more simple for us to get a prioritized list of repositories
for processing on the back-ends.
"""
if self.last_updated is None:
self.last_updated = datetime.fromtimestamp(0)
return super(Repository, self).save(*args, **kwargs)
def __str__(self):
return "%s(%s)" % (self.url, self.concrete_type)
class ProjectRepository(models.Model):
project = models.ForeignKey(Project)
repository = models.ForeignKey(Repository)
login = models.CharField(max_length=200)
def login_exits(self):
return False
def activity_count(self):
return RepositoryActivity.objects.filter(repository=self.repository).count()
def login_activity_count(self):
return RepositoryActivity.objects.filter(repository=self.repository).filter(login=self.login).count()
class Meta:
verbose_name_plural = "project repositories"
def __str__(self):
return "%s@%s" % (self.login, self.repository)
class Wiki(models.Model):
projects = models.ManyToManyField(Project, through='ProjectWiki')
url = models.CharField(max_length=200)
concrete_type = models.CharField(max_length=16, choices=WikiKind.values)
def __str__(self):
return "%s(%s)" % (self.url, self.concrete_type)
class ProjectWiki(models.Model):
project = models.ForeignKey(Project)
wiki = models.ForeignKey(Wiki)
login = models.CharField(max_length=200)
def __str__(self):
return "%s@%s" % (self.login, self.wiki)
class IssueTracker(models.Model):
projects = models.ManyToManyField(Project, through='ProjectIssueTracker')
url = models.CharField(max_length=200)
concrete_type = models.CharField(max_length=16, choices=IssueTrackerKind.values)
def __str__(self):
return "%s(%s)" % (self.url, self.concrete_type)
class ProjectIssueTracker(models.Model):
project = models.ForeignKey(Project)
issue_tracker = models.ForeignKey(IssueTracker)
login = models.CharField(max_length=200)
def __str__(self):
return "%s@%s" % (self.login, self.issue_tracker)
class Activity(models.Model):
date = models.DateTimeField()
login = models.CharField(max_length=200)
class Meta:
abstract = True
class RepositoryActivity(Activity):
repository = models.ForeignKey(Repository, related_name='activities')
class Meta:
verbose_name_plural = "repository activities"
get_latest_by = 'date'
def __str__(self):
return "%s@%s(%s)" % (self.login, self.repository, self.date)
class IssueTrackerActivity(Activity):
issue_tracker = models.ForeignKey(IssueTracker)
class Meta:
verbose_name_plural = "issue tracker activities"
def __str__(self):
return "%s@%s(%s)" % (self.login, self.issue_tracker, self.date)
class WikiActivity(Activity):
wiki = models.ForeignKey(Wiki)
class Meta:
verbose_name_plural = "wiki activities"
def __str__(self):
return "%s@%s(%s)" % (self.login, self.wiki, self.date)
| {
"content_hash": "a6140aecf05815d4e459427a646229c5",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 109,
"avg_line_length": 29.74576271186441,
"alnum_prop": 0.6653371320037986,
"repo_name": "sema/django-2012",
"id": "bdff8acf020108e85065d38ffd1403ff26d12e8a",
"size": "5265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website/mosaicportfolio/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "65594"
},
{
"name": "Python",
"bytes": "78297"
}
],
"symlink_target": ""
} |
__all__ = ['cntv_download', 'cntv_download_by_id']
from common import *
import json
import re
def cntv_download_by_id(id, title=None, output_dir='.', merge=True):
assert id
info = json.loads(get_html('http://vdn.apps.cntv.cn/api/getHttpVideoInfo.do?pid='+id).decode('utf-8'))
title = title or info['title']
video = info['video']
alternatives = [x for x in video.keys() if x.startswith('chapters')]
assert alternatives in (['chapters'], ['chapters', 'chapters2']), alternatives
chapters = video['chapters2'] if 'chapters2' in video else video['chapters']
urls = [x['url'] for x in chapters]
ext = r1(r'\.([^.]+)$', urls[0])
assert ext in ('flv', 'mp4')
download_urls(urls, title, str(ext), total_size=None, merge=merge)
def cntv_download(url, merge=True):
if re.match(r'http://\w+\.cntv\.cn/(\w+/\w+/classpage/video/)?\d+/\d+\.shtml', url):
id = r1(r'<!--repaste.video.code.begin-->(\w+)<!--repaste.video.code.end-->', get_html(url))
elif re.match(r'http://xiyou.cntv.cn/v-[\w-]+\.html', url):
id = r1(r'http://xiyou.cntv.cn/v-([\w-]+)\.html', url)
else:
raise NotImplementedError(url)
cntv_download_by_id(id, merge=merge)
download = cntv_download
download_playlist = playlist_not_supported('cntv')
def main():
script_main('cntv', cntv_download)
if __name__ == '__main__':
main()
| {
"content_hash": "6f86938cf2e65dc4030027bd7d252598",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 103,
"avg_line_length": 35.37837837837838,
"alnum_prop": 0.6516424751718869,
"repo_name": "rahimnathwani/youku-lixian",
"id": "0ca3f961a01b53d56321e06f0cfd930aafc393c4",
"size": "1332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cntv.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""Tests for Intel MPI benchmark."""
import unittest
from unittest import mock
from absl.testing import flagsaver
from absl.testing import parameterized
from perfkitbenchmarker import os_types
from perfkitbenchmarker.linux_packages import imb
from perfkitbenchmarker.linux_packages import intelmpi
# Required for --mpi_vendor flag.
from perfkitbenchmarker.linux_packages import mpi # pylint: disable=unused-import
from tests import pkb_common_test_case
def MockVm():
return mock.Mock(
internal_ip='1.2.3.4', NumCpusForBenchmark=8, BASE_OS_TYPE=os_types.RHEL)
class IntelMpiLibTestCase(pkb_common_test_case.PkbCommonTestCase):
MPIVARS_FILE = ('/opt/intel/compilers_and_libraries/'
'linux/mpi/intel64/bin/mpivars.sh')
COMPILE_2019 = ('cd mpi-benchmarks; '
'. /opt/intel/mkl/bin/mklvars.sh intel64; '
'. /opt/intel/compilers_and_libraries/'
'linux/bin/compilervars.sh intel64; '
'CC=mpicc CXX=mpicxx make')
COMPILE_2021 = ('cd mpi-benchmarks; '
'. /opt/intel/oneapi/setvars.sh; '
'CC=mpicc CXX=mpicxx make')
def setUp(self):
super().setUp()
self.enter_context(flagsaver.flagsaver(mpi_vendor='intel'))
def MockVmWithReturnValues(self):
# for use when calling intelmpi.py commands to find mpivars, MPI version
vm = MockVm()
vm_returns = [
self.MPIVARS_FILE,
('Intel(R) MPI Library for Linux* OS, '
'Version 2018 Update 4 Build 20180823 (id: 18555)')
]
vm.RemoteCommand.side_effect = [(txt, '') for txt in vm_returns]
return vm
def testInstallCompileSource(self) -> None:
vm = MockVm()
imb.Install(vm)
# TODO(user) taken out due to not installing MKL
# vm.InstallPackages.assert_called_with('intel-mkl-2020.1-102')
# just confirm that the git clone and patch were done
cmd = ';'.join([cmd[0][0] for cmd in vm.RemoteCommand.call_args_list])
self.assertRegex(
cmd, 'git clone -n https://github.com/intel/mpi-benchmarks.git',
'Missing git clone command')
self.assertRegex(cmd, 'patch -d mpi-benchmarks -p3 < ~/intelmpi.patch',
'Missing patch command')
def testMpirunMpiVersion(self):
vm = self.MockVmWithReturnValues()
mpi_version = intelmpi.MpirunMpiVersion(vm)
self.assertEqual('2018.4', mpi_version)
vm.RemoteCommand.assert_called_with(f'. {self.MPIVARS_FILE}; mpirun -V')
def testMpirunMpiVersionError(self):
vm = MockVm()
vm.RemoteCommand.return_value = 'Non parsable text', ''
with self.assertRaises(ValueError):
intelmpi.MpirunMpiVersion(vm)
@parameterized.parameters((2, ' -ppn 1'), (4, ''))
def testPpn(self, total_processes, expected_suffix):
vm = self.MockVmWithReturnValues()
hosts = ['10.0.0.1', '10.0.0.2']
mpirun = imb.MpiRunCommand(vm, hosts, total_processes, 0, [], [], False)
# '-ppn 1' is only seen when running single threaded tests
expected_mpirun = (f'mpirun -n {total_processes} -hosts 10.0.0.1,10.0.0.2'
f'{expected_suffix}')
self.assertEqual(f'. {self.MPIVARS_FILE}; {expected_mpirun}', mpirun)
@parameterized.parameters(
('2019.6', COMPILE_2019, []),
('2021.2', COMPILE_2021,
['intel-oneapi-compiler-dpcpp-cpp', 'intel-oneapi-mpi-devel']))
def testInstall2021(self, intelmpi_version, expected_compile_cmd,
installed_packages):
vm = MockVm()
with flagsaver.flagsaver(intelmpi_version=intelmpi_version):
imb.Install(vm)
vm.RemoteCommand.assert_any_call(expected_compile_cmd)
vm.InstallPackages.assert_has_calls(
[mock.call(pkb) for pkb in installed_packages])
class OpenMpiLibTestCase(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super().setUp()
self.enter_context(flagsaver.flagsaver(mpi_vendor='openmpi'))
def testInstallCompileSource(self) -> None:
vm = MockVm()
imb.Install(vm)
cmd = ';'.join([cmd[0][0] for cmd in vm.RemoteCommand.call_args_list])
self.assertRegex(
cmd, 'git clone -n https://github.com/intel/mpi-benchmarks.git',
'Missing git clone command')
self.assertRegex(cmd, 'patch -d mpi-benchmarks -p3 < ~/intelmpi.patch',
'Missing patch command')
@flagsaver.flagsaver(imb_compile_from_source=False)
def testInstallWithoutImbCompileFromSourceThrows(self) -> None:
vm = MockVm()
with self.assertRaises(ValueError) as e:
imb.Install(vm)
self.assertEqual(
str(e.exception),
'--mpi_vendor=openmpi requires --imb_compile_from_source')
def testMpiRunCommandEnvVarsExported(self):
vm = MockVm()
total_proc = 2
ppn = 1
hosts = ['10.0.0.1', '10.0.0.2']
environment = [
'OMPI_MCA_btl=self,tcp',
'OMPI_MCA_rmaps_base_mapping_policy=node:PE=1',
]
mpirun = imb.MpiRunCommand(vm, hosts, total_proc, ppn, environment, [],
False)
expected_mpirun = (
'OMPI_MCA_btl=self,tcp OMPI_MCA_rmaps_base_mapping_policy=node:PE=1 '
'mpirun -x OMPI_MCA_btl -x OMPI_MCA_rmaps_base_mapping_policy '
'-report-bindings -display-map -n 2 -npernode 1 --use-hwthread-cpus '
'-host 10.0.0.1:slots=2,10.0.0.2:slots=2')
self.assertEqual(expected_mpirun, mpirun)
def testMpiRunCommandNoEnvVarsIsFormattedCorrectly(self):
vm = MockVm()
total_proc = 2
ppn = 1
hosts = ['10.0.0.1', '10.0.0.2']
environment = []
mpirun = imb.MpiRunCommand(vm, hosts, total_proc, ppn, environment, [],
False)
expected_mpirun = (
'mpirun -report-bindings -display-map -n 2 -npernode 1 '
'--use-hwthread-cpus -host 10.0.0.1:slots=2,10.0.0.2:slots=2')
self.assertEqual(expected_mpirun, mpirun)
def testMpiRunCommandNoPpnSpecified(self):
vm = MockVm()
total_proc = 8
ppn = 0
hosts = ['10.0.0.1', '10.0.0.2', '10.0.0.3', '10.0.0.4']
environment = []
mpirun = imb.MpiRunCommand(vm, hosts, total_proc, ppn, environment, [],
False)
expected_mpirun = (
'mpirun -report-bindings -display-map -n 8 -npernode 2 '
'--use-hwthread-cpus -host '
'10.0.0.1:slots=8,10.0.0.2:slots=8,10.0.0.3:slots=8,10.0.0.4:slots=8')
self.assertEqual(expected_mpirun, mpirun)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "a2d592a2fb93e3aca237ab08c716139b",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 82,
"avg_line_length": 35.357142857142854,
"alnum_prop": 0.6397824397824398,
"repo_name": "GoogleCloudPlatform/PerfKitBenchmarker",
"id": "bba2e38021dbc03054efd22624525e56cf661bdb",
"size": "6435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/linux_packages/imb_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3420"
},
{
"name": "HTML",
"bytes": "113073"
},
{
"name": "Jinja",
"bytes": "62005"
},
{
"name": "Lua",
"bytes": "1547"
},
{
"name": "Python",
"bytes": "6076512"
},
{
"name": "R",
"bytes": "1017"
},
{
"name": "Shell",
"bytes": "76164"
},
{
"name": "Tcl",
"bytes": "14601"
}
],
"symlink_target": ""
} |
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='DictLearner',
description='Framework for sparse dictionary learning and several example models.',
long_description=long_description
)
"""
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'ecog=bin:main',
],
},
""" | {
"content_hash": "4af6e618f63568e4a0ecf2262710dd6f",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 91,
"avg_line_length": 34.06896551724138,
"alnum_prop": 0.7135627530364372,
"repo_name": "emdodds/DictLearner",
"id": "9f6e9eb69f555356470fdac81a9d487ed013a2ec",
"size": "988",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "139896"
},
{
"name": "Shell",
"bytes": "375"
}
],
"symlink_target": ""
} |
from sqlalchemy.ext.declarative import declarative_base
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
db_session = db.session
Base = declarative_base()
def init_db(app):
db.init_app(app)
with app.app_context():
Base.metadata.create_all(bind=db.engine)
Base.query = db_session.query_property()
| {
"content_hash": "ddf0ece243166bac87c384937f595699",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 55,
"avg_line_length": 23.5,
"alnum_prop": 0.723404255319149,
"repo_name": "roxel/planner",
"id": "57e2fdbe97f22d642b54596febc14d7d66646521",
"size": "329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/database.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1126"
},
{
"name": "HTML",
"bytes": "8983"
},
{
"name": "JavaScript",
"bytes": "107"
},
{
"name": "Makefile",
"bytes": "225"
},
{
"name": "Python",
"bytes": "8492"
}
],
"symlink_target": ""
} |
import __future__
import sys
import numpy
sys.stdin = open("./challenge_sample_input", 'r')
print("===" * 30)
print("SAMPLE OUTPUT:")
print("===" * 30)
print(open("./challenge_sample_output", 'r').read())
print("===" * 30)
print("START")
print("===" * 30)
A = numpy.array(map(int,raw_input().split()))
B = numpy.array(map(int,raw_input().split()))
print(numpy.inner(A,B))
print(numpy.outer(A,B))
| {
"content_hash": "06cba48f2fac2b8cae7e9782202e9d4f",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 52,
"avg_line_length": 24.8125,
"alnum_prop": 0.6272040302267002,
"repo_name": "shollingsworth/HackerRank",
"id": "10743ba4d83bb2658c7c57a8884354b6c2c33491",
"size": "443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/np-inner-and-outer/main.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "123702"
},
{
"name": "Ruby",
"bytes": "3848"
},
{
"name": "Shell",
"bytes": "1178"
}
],
"symlink_target": ""
} |
r"""Simple Reinforcement Learning test environment.
Kinematic point mass environment similar to the one from the MAML paper.
"""
import gym
from gym.utils import seeding
import matplotlib.pyplot as plt
import numpy as np
class MovePointEnv(gym.Env):
"""Simple point mass gym environment.
The goal is to move an agent (point) from a start location to a goal
position. Each time step, the agent can move in any direction. In addition,
each action to the point could be rotated by a fixed angle.
The agent is limited to a [-2, 2] range in X and Y dimensions.
Args:
start_pos: Starting position of the point.
end_pos: Ending position of the point.
goal_reached_distance: The episode terminates early if the agent is within
this distance to end_pos.
trial_length: Maximum length of the episode.
action_rotation: The degree to rotate the action by, used to test NoRML.
sparse_reward: If true, the reward is -1 until the episode terminates,
otherwise the reward is the negative distance to end_pos.
"""
def __init__(self,
start_pos,
end_pos,
goal_reached_distance=0.1,
trial_length=100,
action_rotation=0.,
sparse_reward=False):
self._start_pos = np.array(start_pos).reshape((-1, 2))
self._current_pos = self._start_pos
self._end_pos = np.array(end_pos).reshape((-1, 2))
self._action_rotation = action_rotation
self._sparse_reward = sparse_reward
if np.abs(self._start_pos).max() > 2:
raise ValueError('Start position out of bounds.')
if np.abs(self._end_pos).max() > 2:
raise ValueError('End position out of bounds.')
self._positions_log = [self._start_pos]
self._goal_reached_distance = goal_reached_distance
self._trial_length = trial_length
self._step = 0
self.action_space = gym.spaces.Box(
-np.ones(2), np.ones(2), dtype=np.float32)
self.observation_space = gym.spaces.Box(
np.ones(2) * -2, np.ones(2) * 2, dtype=np.float32)
def reset(self):
self._current_pos = self._start_pos
self._positions_log = [self._start_pos]
self._step = 0
return self._get_observation()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
"""Step forward the simulation, given the action.
Args:
action: displacement vector.
Returns:
observations: The new position of the robot after the action.
reward: The reward for the current state-action pair (negative distance to
goal).
done: Whether the episode has ended.
info: A dictionary that stores diagnostic information.
"""
self._step += 1
rot_matrix = np.array(
[[np.cos(self._action_rotation), -np.sin(self._action_rotation)],
[np.sin(self._action_rotation),
np.cos(self._action_rotation)]])
new_pos = self._current_pos + rot_matrix.dot(action)
new_pos = np.clip(new_pos,
np.ones(new_pos.shape) * -2,
np.ones(new_pos.shape) * 2)
distance = np.sqrt(np.sum((new_pos - self._end_pos)**2))
reward = -1. if self._sparse_reward else -distance
self._current_pos = new_pos
self._positions_log.append(new_pos)
done = (distance < self._goal_reached_distance) or (self._step >=
self._trial_length)
return self._get_observation(), reward, done, {}
def _get_observation(self):
return np.copy(self._current_pos).reshape((-1, 2))
def render(self, mode='rgb_array', margin=0.1, limits=((-2, 2), (-2, 2))):
if mode != 'rgb_array':
raise ValueError('Only rgb_array is supported.')
fig = plt.figure()
pos = np.vstack(self._positions_log)
plt.plot(pos[:, 0], pos[:, 1], 'b.-')
plt.plot(self._start_pos[:, 0], self._start_pos[:, 1], 'r+')
plt.plot(self._end_pos[:, 0], self._end_pos[:, 1], 'g+')
plt.xlim(limits[0])
plt.ylim(limits[1])
plt.gca().set_aspect('equal', adjustable='box')
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
plt.close()
return data
| {
"content_hash": "0e870c79f5db24df86be7b564989c762",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 80,
"avg_line_length": 35.47107438016529,
"alnum_prop": 0.6258154706430569,
"repo_name": "google-research/google-research",
"id": "721acd0d88ebefe3e6074abaf61b6f1e4d294479",
"size": "4900",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "norml/envs/move_point_env.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
import numpy as np
from .vad import vad
def _get_edges(vact):
edges = np.flatnonzero(np.diff(vact.astype(int)))
edges = edges + 1
if vact[0]:
edges = np.hstack((0, edges))
if vact[-1]:
edges = np.hstack((edges, vact.size))
edges = np.minimum(edges, vact.size).reshape(-1, 2)
edges = edges[(edges[:, 1] - edges[:, 0]) > 0]
return edges
def _rms(arr):
return np.sqrt((arr ** 2.0).mean())
def _drop_silence(waveform, edges, threshold_db):
rms = []
for s, e in edges:
rms.append(_rms(waveform[s:e]))
rms = 20 * np.log10(rms)
return edges[rms >= threshold_db]
def _merge_short_silence(edges, max_samples):
if len(edges) == 0:
return edges
ret = [edges[0].tolist()]
for s, e in edges[1:]:
if s - ret[-1][-1] < max_samples:
ret[-1][-1] = e
else:
ret.append([s, e])
return np.asarray(ret)
def trim(
data, fs, fs_vad=16000, hop_length=30, vad_mode=0, threshold_db=-35.0, min_dur=0.2
):
"""
Trim leading and trailing silence from an speech waveform by using vad.
Parameters
----------
data : ndarray
numpy array of mono (1 ch) speech data.
1-d or 2-d, if 2-d, shape must be (1, time_length) or (time_length, 1).
if data type is int, -32768 < data < 32767.
if data type is float, -1 < data < 1.
fs : int
Sampling frequency of data.
fs_vad : int, optional
Sampling frequency for webrtcvad.
fs_vad must be 8000, 16000, 32000 or 48000.
Default is 16000.
hop_length : int, optional
Step size[milli second].
hop_length must be 10, 20, or 30.
Default is 0.1.
vad_mode : int, optional
set vad aggressiveness.
As vad_mode increases, it becomes more aggressive.
vad_mode must be 0, 1, 2 or 3.
Default is 0.
threshold_db : float, optional
The threshold level (in dB) below reference to consider as silence.
Default is -35.0.
min_dur : float, optional
The minimum duration (in seconds) of each speech segment.
Default is 0.5.
Returns
-------
(start_index, end_index) : int
trimed waveform is data[start_index:end_index]
If voice activity can't be detected, return 0, 0.
"""
vact = vad(data, fs, fs_vad, hop_length, vad_mode)
edges = _get_edges(vact)
edges = _merge_short_silence(edges, fs * 0.1)
edges = edges[(edges[:, 1] - edges[:, 0]) > fs * min_dur]
edges = _drop_silence(data, edges, threshold_db)
edges = edges.ravel()
if edges.any():
return edges[0], edges[-1]
else:
return 0, 0
def split(
data, fs, fs_vad=16000, hop_length=30, vad_mode=0, threshold_db=-35.0, min_dur=0.5
):
"""
Split a speech waveform into non-silent intervals by using vad.
Parameters
----------
data : ndarray
numpy array of mono (1 ch) speech data.
1-d or 2-d, if 2-d, shape must be (1, time_length) or (time_length, 1).
if data type is int, -32768 < data < 32767.
if data type is float, -1 < data < 1.
fs : int
Sampling frequency of data.
fs_vad : int, optional
Sampling frequency for webrtcvad.
fs_vad must be 8000, 16000, 32000 or 48000.
Default is 16000.
hop_length : int, optional
Step size[milli second].
hop_length must be 10, 20, or 30.
Default is 0.1.
vad_mode : int, optional
Set vad aggressiveness.
As vad_mode increases, it becomes more aggressive.
vad_mode must be 0, 1, 2 or 3.
Default is 0.
threshold_db : float, optional
The threshold level (in dB) below reference to consider as silence.
Default is -35.0.
min_dur : float, optional
The minimum duration (in seconds) of each speech segment.
Default is 0.5.
Returns
-------
edges : np.ndarray, shape=(m, 2)
`edges[i] == (start_i, end_i)` are the start and end time
(in samples) of non-silent interval `i`.
"""
vact = vad(data, fs, fs_vad, hop_length, vad_mode)
edges = _get_edges(vact)
edges = _merge_short_silence(edges, fs * 0.1)
edges = edges[(edges[:, 1] - edges[:, 0]) > fs * min_dur]
edges = _drop_silence(data, edges, threshold_db)
return edges
| {
"content_hash": "1a0c535bda10ff1206a7fa918f1484f7",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 86,
"avg_line_length": 27.89171974522293,
"alnum_prop": 0.5788992920758164,
"repo_name": "F-Tag/python-vad",
"id": "c68aecdec5ad028b3a62a56d2c726b5a5c43c74c",
"size": "4379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvad/effects.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10532"
}
],
"symlink_target": ""
} |
"""Test the TcEx API Snippets."""
# first-party
from tcex.api.tc.v3.tql.tql_operator import TqlOperator
from tests.api.tc.v3.v3_helpers import TestV3, V3Helper
class TestCaseSnippets(TestV3):
"""Test TcEx API Interface."""
v3 = None
def setup_method(self, method: callable):
"""Configure setup before all tests."""
print('') # ensure any following print statements will be on new line
self.v3_helper = V3Helper('cases')
self.v3 = self.v3_helper.v3
self.tcex = self.v3_helper.tcex
# remove an previous cases with the next test case name as a tag
cases = self.v3.cases()
cases.filter.tag(TqlOperator.EQ, method.__name__)
for case in cases:
case.delete()
def test_case_create(self):
"""Test snippet"""
# Begin Snippet
case = self.tcex.v3.case(
name='MyCase-0001',
description='An example case description.',
resolution='Not Specified',
severity='Low',
status='Open',
xid='MyCase-0001',
)
case.create()
# End Snippet
# Add cleanup
case.delete()
def test_case_stage_artifact(self):
"""Test snippet"""
# Begin Snippet
case = self.tcex.v3.case(
name='MyCase-0002',
description='An example case description.',
severity='Low',
status='Open',
xid='MyCase-0002',
)
# Add artifact
artifact = self.tcex.v3.artifact(
summary='asn999',
type='ASN',
)
case.stage_artifact(artifact)
case.create()
# End Snippet
# Add cleanup
case.delete()
def test_case_stage_attribute(self):
"""Test snippet"""
# Begin Snippet
case = self.tcex.v3.case(
name='MyCase-0003',
description='An example case description.',
severity='Low',
status='Open',
xid='MyCase-0003',
)
# Add attribute
attribute = self.tcex.v3.case_attribute(
value='An example description attribute.',
type='Description',
)
case.stage_attribute(attribute)
case.create()
# End Snippet
# Add cleanup
case.delete()
def test_case_stage_note(self):
"""Test snippet"""
# Begin Snippet
case = self.tcex.v3.case(
name='MyCase-0004',
description='An example case description.',
severity='Low',
status='Open',
xid='MyCase-0004',
)
# Add note
note = self.tcex.v3.note(text='An example note.')
case.stage_note(note)
case.create()
# End Snippet
# Add cleanup
case.delete()
def test_case_stage_tag(self):
"""Test snippet"""
# Begin Snippet
case = self.tcex.v3.case(
name='MyCase-0005',
description='An example case description.',
severity='Low',
status='Open',
xid='MyCase-0005',
)
# Add tag
tag = self.tcex.v3.tag(name='Example-Tag')
case.stage_tag(tag)
case.create()
# End Snippet
# Add cleanup
case.delete()
def test_case_stage_task(self):
"""Test snippet"""
# Begin Snippet
case = self.tcex.v3.case(
name='MyCase-0006',
description='An example case description.',
severity='Low',
status='Open',
xid='MyCase-0006',
)
# Add task
task = self.tcex.v3.task(description='An example task description.', name='MyTask')
case.stage_task(task)
case.create()
# End Snippet
# Add cleanup
case.delete()
def test_case_delete_by_id(self):
"""Test snippet"""
case = self.v3_helper.create_case()
# Begin Snippet
case = self.tcex.v3.case(id=case.model.id)
case.delete()
# End Snippet
def test_case_delete_by_name(self):
"""Test snippet"""
case = self.v3_helper.create_case(name='MyCase-0007')
# Begin Snippet
cases = self.tcex.v3.cases()
cases.filter.name(TqlOperator.EQ, 'MyCase-0007')
for case in cases:
# IMPORTANT: this will delete all cases with the name "MyCase-0007"
case.delete()
# End Snippet
def test_case_delete_artifact(self):
"""Test snippet"""
case = self.v3_helper.create_case(
name='MyCase-0007', artifacts={'type': 'ASN', 'summary': 'ASN1234'}
)
# Begin Snippet
case = self.tcex.v3.case(id=case.model.id)
case.get(params={'fields': ['artifacts']})
for artifact in case.artifacts:
if artifact.model.summary == 'ASN1234':
# IMPORTANT: this will delete all case artifacts with summary "ASN1234"
artifact.delete()
# End Snippet
def test_case_delete_attribute(self):
"""Test snippet"""
case = self.v3_helper.create_case(
name='MyCase-0008',
attributes={'type': 'Description', 'value': 'An Example Case Description'},
)
# Begin Snippet
case = self.tcex.v3.case(id=case.model.id)
case.get(params={'fields': ['attributes']})
for attribute in case.attributes:
if attribute.model.value == 'An Example Case Description':
# IMPORTANT: this will delete all attributes attached to the case
# with value "An Example Case Description"
attribute.delete()
# End Snippet
def test_case_delete_note(self):
"""Test snippet"""
case = self.v3_helper.create_case(
name='MyCase-0009',
notes={'text': 'An Example Note'},
)
# Begin Snippet
case = self.tcex.v3.case(id=case.model.id)
case.get(params={'fields': ['notes']})
for note in case.notes:
if note.model.text == 'An Example Note':
# IMPORTANT: this will delete all notes attached to the case
# with value "An Example Note"
note.delete()
# End Snippet
def test_case_remove_tag(self):
"""Test snippet"""
case = self.v3_helper.create_case(
name='MyCase-10',
tags=[
{'name': 'Example-Tag'},
],
)
# Begin Snippet
case = self.tcex.v3.case(id=case.model.id)
for tag in case.tags:
if tag.model.name != 'Example-Tag':
# IMPORTANT: on case management the submitted tags will replace any existing
# tags on the case.
case.stage_tag(tag.model)
case.update()
# End Snippet
def test_case_delete_task(self):
"""Test snippet"""
case = self.v3_helper.create_case(
name='MyCase-11',
task={
'description': 'An Example Task Description',
'name': 'MyTask',
},
)
# Begin Snippet
case = self.tcex.v3.case(id=case.model.id)
case.get(params={'fields': ['tasks']})
for task in case.tasks:
if task.model.name == 'MyTask':
# IMPORTANT: this will delete all case tasks with value "MyTask"
task.delete()
# End Snippet
def test_case_iterate(self):
"""Test snippet"""
case = self.v3_helper.create_case(name='MyCase-12')
# Begin Snippet
for case in self.tcex.v3.cases():
print(case.model.dict(exclude_none=True))
def test_case_get_by_name(self):
"""Test snippet"""
case = self.v3_helper.create_case(name='MyCase-13')
# Begin Snippet
cases = self.tcex.v3.cases()
cases.filter.name(TqlOperator.EQ, 'MyCase-13')
for case in cases:
# IMPORTANT: this will return all cases with the name "MyCase-13"
print(case.model.json(exclude_none=True))
# End Snippet
def test_case_update(self):
"""Test snippet"""
case = self.v3_helper.create_case(name='MyCase-14')
# Begin Snippet
case = self.tcex.v3.case(id=case.model.id)
case.model.name = 'MyUpdatedCase'
case.update()
# End Snippet
| {
"content_hash": "2a7aa9d050f140ea968128a51485db36",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 92,
"avg_line_length": 29.25085910652921,
"alnum_prop": 0.5337171052631579,
"repo_name": "ThreatConnect-Inc/tcex",
"id": "3373baf948208aece5fdaa4d50762b199c8c41ac",
"size": "8512",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/api/tc/v3/cases/test_case_snippets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2735042"
}
],
"symlink_target": ""
} |
'''
Log handler.
'''
from concurrent.futures import ThreadPoolExecutor
from config import CMS_CFG
from torcms.core import tools
from torcms.core.base_handler import BaseHandler
from torcms.model.log_model import MLog
class LogHandler(BaseHandler):
'''
Log handler.
'''
executor = ThreadPoolExecutor(2)
def initialize(self, **kwargs):
super().initialize()
def get(self, *args, **kwargs):
url_str = args[0]
url_arr = self.parse_url(url_str)
if len(url_arr) == 1:
if url_arr[0] in ['pageview', 'search']:
# 访问量
self.search()
# elif len(url_arr) == 2:
# if url_arr[0] == 'pageview':
# self.pageview(url_arr[1])
# else:
# self.user_log_list(url_arr[0], url_arr[1])
else:
self.render('misc/html/404.html', userinfo=self.userinfo, kwd={})
def post(self, *args, **kwargs):
url_str = args[0]
url_arr = self.parse_url(url_str)
if url_arr[0] in ['_add']:
if len(url_arr) == 2:
self.add(uid=url_arr[1])
else:
self.add()
elif url_arr[0] == 'search':
self.search()
else:
self.show404()
def add(self, **kwargs):
'''
in infor.
'''
post_data = {}
for key in self.request.arguments:
post_data[key] = self.get_arguments(key)[0]
MLog.add(post_data)
kwargs.pop('uid', None) # delete `uid` if exists in kwargs
self.redirect('/log/')
def list(self, cur_p=''):
'''
View the list of the Log.
'''
current_page_number = 1
if cur_p == '':
current_page_number = 1
else:
try:
current_page_number = int(cur_p)
except TypeError:
current_page_number = 1
except Exception as err:
print(err.args)
print(str(err))
print(repr(err))
current_page_number = 1 if current_page_number < 1 else current_page_number
pager_num = int(MLog.total_number() / CMS_CFG['list_num'])
kwd = {
'pager': '',
'title': '',
'current_page': current_page_number,
}
if self.is_p:
self.render('admin/log_ajax/user_list.html',
kwd=kwd,
user_list=MLog.query_all_user(),
no_user_list=MLog.query_all(
current_page_num=current_page_number),
format_date=tools.format_date,
userinfo=self.userinfo)
else:
self.render('misc/log/user_list.html',
kwd=kwd,
user_list=MLog.query_all_user(),
no_user_list=MLog.query_all(
current_page_num=current_page_number),
format_date=tools.format_date,
userinfo=self.userinfo)
def user_log_list(self, userid, cur_p=''):
'''
View the list of the Log.
'''
current_page_number = 1
if cur_p == '':
current_page_number = 1
else:
try:
current_page_number = int(cur_p)
except TypeError:
current_page_number = 1
except Exception as err:
print(err.args)
print(str(err))
print(repr(err))
current_page_number = 1 if current_page_number < 1 else current_page_number
pager_num = int(MLog.total_number() / CMS_CFG['list_num'])
kwd = {
'pager': '',
'title': '',
'current_page': current_page_number,
'user_id': userid,
}
if self.is_p:
self.render('admin/log_ajax/user_log_list.html',
kwd=kwd,
infos=MLog.query_pager_by_user(
userid, current_page_num=current_page_number),
format_date=tools.format_date,
userinfo=self.userinfo)
else:
self.render('misc/log/user_log_list.html',
kwd=kwd,
infos=MLog.query_pager_by_user(
userid, current_page_num=current_page_number),
format_date=tools.format_date,
userinfo=self.userinfo)
def pageview(self, cur_p=''):
'''
View the list of the Log.
'''
current_page_number = 1
if cur_p == '':
current_page_number = 1
else:
try:
current_page_number = int(cur_p)
except TypeError:
current_page_number = 1
except Exception as err:
print(err.args)
print(str(err))
print(repr(err))
current_page_number = 1 if current_page_number < 1 else current_page_number
pager_num = int(MLog.total_number() / CMS_CFG['list_num'])
kwd = {
'pager': '',
'title': '',
'current_page': current_page_number,
}
arr_num = []
postinfo = MLog.query_all_current_url()
for i in postinfo:
postnum = MLog.count_of_current_url(i.current_url)
arr_num.append(postnum)
self.render('misc/log/pageview.html',
kwd=kwd,
# infos=MLog.query_all_pageview(
# current_page_num=current_page_number),
postinfo=postinfo,
arr_num=arr_num,
format_date=tools.format_date,
userinfo=self.userinfo)
def search(self, **kwargs):
post_data = self.get_request_arguments()
url = post_data.get('url')
res = MLog.get_by_url(url)
self.render('misc/log/pageview_search.html',
res=res,
format_date=tools.format_date,
userinfo=self.userinfo)
class LogPartialHandler(LogHandler):
'''
Partially render for user handler.
'''
def initialize(self, **kwargs):
super().initialize()
self.is_p = True
| {
"content_hash": "f3b15938332467a932a6103d6cc33d69",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 83,
"avg_line_length": 29.263636363636362,
"alnum_prop": 0.47452625038831936,
"repo_name": "bukun/TorCMS",
"id": "c811333e634dde219a759c9bc340c6ca19ee35eb",
"size": "6467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "torcms/handlers/log_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "75939"
},
{
"name": "Dockerfile",
"bytes": "2243"
},
{
"name": "HTML",
"bytes": "292427"
},
{
"name": "JavaScript",
"bytes": "34394"
},
{
"name": "Makefile",
"bytes": "1108"
},
{
"name": "Python",
"bytes": "747675"
},
{
"name": "Ruby",
"bytes": "926"
},
{
"name": "SCSS",
"bytes": "550"
},
{
"name": "Sass",
"bytes": "69221"
},
{
"name": "Shell",
"bytes": "1317"
}
],
"symlink_target": ""
} |
import json
from watson_developer_cloud import PersonalityInsightsV3
class Personality:
def __init__(self, url, username, password, version):
self.personality_insights = PersonalityInsightsV3(
url=url,
version=version,
username=username,
password=password,
)
def analize(self, text):
"""
Returns the Watson PI Data for a specific text.
"""
try:
profile = self.personality_insights.profile(text,
raw_scores=True,
consumption_preferences=True)
except Exception as e:
print("Error during API call", e)
profile = ""
return profile
| {
"content_hash": "6895df07d8180d7a6a8d4bb90c53fb07",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 85,
"avg_line_length": 28.607142857142858,
"alnum_prop": 0.5131086142322098,
"repo_name": "martialblog/watson-diary",
"id": "9c317680acb217d713891749b7d576988a3a1865",
"size": "822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/personality.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "73"
},
{
"name": "Dockerfile",
"bytes": "604"
},
{
"name": "HTML",
"bytes": "551"
},
{
"name": "JavaScript",
"bytes": "17513"
},
{
"name": "Python",
"bytes": "21280"
},
{
"name": "Roff",
"bytes": "821"
},
{
"name": "Vue",
"bytes": "35247"
}
],
"symlink_target": ""
} |
"""
Interfaces for ABFS
"""
from future import standard_library
standard_library.install_aliases()
from builtins import object
import logging
import os
import sys
import threading
import re
from math import ceil
from posixpath import join
from hadoop.hdfs_site import get_umask_mode
from hadoop.fs.exceptions import WebHdfsException
from desktop.conf import RAZ
from desktop.lib.rest import http_client, resource
from desktop.lib.rest.raz_http_client import RazHttpClient
import azure.abfs.__init__ as Init_ABFS
from azure.abfs.abfsfile import ABFSFile
from azure.abfs.abfsstats import ABFSStat
from azure.conf import PERMISSION_ACTION_ABFS, is_raz_abfs
if sys.version_info[0] > 2:
import urllib.request, urllib.error
from urllib.parse import quote as urllib_quote
from urllib.parse import urlparse as lib_urlparse
else:
from urlparse import urlparse as lib_urlparse
from urllib import quote as urllib_quote
LOG = logging.getLogger(__name__)
# Azure has a 30MB block limit on upload.
UPLOAD_CHUCK_SIZE = 30 * 1000 * 1000
class ABFSFileSystemException(IOError):
def __init__(self, *args, **kwargs):
super(ABFSFileSystemException, self).__init__(*args, **kwargs)
class ABFS(object):
def __init__(
self,
url,
fs_defaultfs,
logical_name=None,
hdfs_superuser=None,
security_enabled=False,
ssl_cert_ca_verify=True,
temp_dir="/tmp",
umask=0o1022,
hdfs_supergroup=None,
access_token=None,
token_type=None,
expiration=None,
username=None
):
self._url = url
self._superuser = hdfs_superuser
self._security_enabled = security_enabled
self._ssl_cert_ca_verify = ssl_cert_ca_verify
self._temp_dir = temp_dir
self._umask = umask
self.is_sentry_managed = lambda path: False
self._fs_defaultfs = fs_defaultfs
self._logical_name = logical_name
self._supergroup = hdfs_supergroup
self._access_token = access_token
self._token_type = token_type
split = lib_urlparse(fs_defaultfs)
self._scheme = split.scheme
self._netloc = split.netloc
self._is_remote = True
self._has_trash_support = False
self._filebrowser_action = PERMISSION_ACTION_ABFS
self.expiration = expiration
self._user = username
# To store user info
self._thread_local = threading.local() # Unused
self._root = self.get_client(url)
LOG.debug("Initializing ABFS : %s (security: %s, superuser: %s)" % (self._url, self._security_enabled, self._superuser))
@classmethod
def from_config(cls, hdfs_config, auth_provider):
credentials = auth_provider.get_credentials()
return cls(
url=hdfs_config.WEBHDFS_URL.get(),
fs_defaultfs=hdfs_config.FS_DEFAULTFS.get(),
logical_name=None,
security_enabled=False,
ssl_cert_ca_verify=False,
temp_dir=None,
umask=get_umask_mode(),
hdfs_supergroup=None,
access_token=credentials.get('access_token'),
token_type=credentials.get('token_type'),
expiration=int(credentials.get('expires_on')) * 1000 if credentials.get('expires_on') is not None else None,
username=credentials.get('username')
)
def get_client(self, url):
if RAZ.IS_ENABLED.get():
client = RazHttpClient(self._user, url, exc_class=WebHdfsException, logger=LOG)
else:
client = http_client.HttpClient(url, exc_class=WebHdfsException, logger=LOG)
return resource.Resource(client)
def _getheaders(self):
headers = {
"x-ms-version": "2019-12-12" # For latest SAS support
}
if self._token_type and self._access_token:
headers["Authorization"] = self._token_type + " " + self._access_token
return headers
@property
def superuser(self):
return self._superuser
@property
def supergroup(self):
return self._supergroup
# Parse info about filesystems, directories, and files
# --------------------------------
def isdir(self, path):
"""
Checks if the path is a directory (note diabled because filebrowser/views is bugged)
"""
resp = self.stats(path)
return resp.isDir
def isfile(self, path):
"""
Checks if the path is a file
"""
return not self.isdir(path)
def exists(self, path):
"""
Test if a path exists
"""
try:
if ABFS.isroot(path):
return True
self.stats(path)
except WebHdfsException as e:
if e.code == 404:
return False
raise WebHdfsException
except IOError:
return False
return True
def stats(self, path, params=None, **kwargs):
"""
List the stat of the actual file/directory
Returns the ABFFStat object
"""
if ABFS.isroot(path):
return ABFSStat.for_root(path)
try:
file_system, dir_name = Init_ABFS.parse_uri(path)[:2]
except:
raise IOError
if dir_name == '':
return ABFSStat.for_filesystem(self._statsf(file_system, params, **kwargs), path)
return ABFSStat.for_single(self._stats(file_system + '/' + dir_name, params, **kwargs), path)
def listdir_stats(self, path, params=None, **kwargs):
"""
List the stats for the directories inside the specified path
Returns the Multiple ABFFStat object #note change later for recursive cases
"""
if ABFS.isroot(path):
return self.listfilesystems_stats(params=None, **kwargs)
dir_stats = []
file_system, directory_name, account = Init_ABFS.parse_uri(path)
root = Init_ABFS.ABFS_ROOT
if path.lower().startswith(Init_ABFS.ABFS_ROOT_S):
root = Init_ABFS.ABFS_ROOT_S
if params is None:
params = {}
if 'recursive' not in params:
params['recursive'] = 'false'
params['resource'] = 'filesystem'
if directory_name != "":
params['directory'] = directory_name
res = self._root._invoke("GET", file_system, params, headers=self._getheaders(), **kwargs)
resp = self._root._format_response(res)
if account != '':
file_system = file_system + account
for x in resp['paths']:
dir_stats.append(ABFSStat.for_directory(res.headers, x, root + file_system + "/" + x['name']))
return dir_stats
def listfilesystems_stats(self, root=Init_ABFS.ABFS_ROOT, params=None, **kwargs):
"""
Lists the stats inside the File Systems, No functionality for params
"""
stats = []
if params is None:
params = {}
params["resource"] = "account"
res = self._root._invoke("GET", params=params, headers=self._getheaders())
resp = self._root._format_response(res)
for x in resp['filesystems']:
stats.append(ABFSStat.for_filesystems(res.headers, x, root))
return stats
def _stats(self, schemeless_path, params=None, **kwargs):
"""
Container function for both stats,
Returns the header of the result
"""
if params is None:
params = {}
params['action'] = 'getStatus'
res = self._root._invoke('HEAD', schemeless_path, params, headers=self._getheaders(), **kwargs)
return res.headers
def _statsf(self, schemeless_path, params=None, **kwargs):
"""
Continer function for both stats but if it's a file system
Returns the header of the result
"""
if params is None:
params = {}
# For RAZ ABFS, the root path stats should have 'getAccessControl' param.
if is_raz_abfs():
params['action'] = 'getAccessControl'
else:
params['resource'] = 'filesystem'
res = self._root._invoke('HEAD', schemeless_path, params, headers=self._getheaders(), **kwargs)
return res.headers
def listdir(self, path, params=None, glob=None, **kwargs):
"""
Lists the names inside the current directories
"""
if ABFS.isroot(path):
return self.listfilesystems(params=params, **kwargs)
listofDir = self.listdir_stats(path, params)
return [x.name for x in listofDir]
def listfilesystems(self, root=Init_ABFS.ABFS_ROOT, params=None, **kwargs):
"""
Lists the names of the File Systems, limited arguements
"""
listofFileSystems = self.listfilesystems_stats(root=root, params=params)
return [x.name for x in listofFileSystems]
@staticmethod
def get_home_dir():
"""
Attempts to go to the directory set by the user in the configuration file. If not defaults to abfs://
"""
return Init_ABFS.get_home_dir_for_abfs()
# Find or alter information about the URI path
# --------------------------------
@staticmethod
def isroot(path):
"""
Checks if the path is the root path
"""
return Init_ABFS.is_root(path)
@staticmethod
def normpath(path):
"""
Normalizes a path
"""
resp = Init_ABFS.normpath(path)
return resp
@staticmethod
def netnormpath(path):
"""
Normalizes a path
"""
return Init_ABFS.normpath(path)
@staticmethod
def parent_path(path):
"""
Returns the Parent Path
"""
return Init_ABFS.parent_path(path)
@staticmethod
def join(first, *comp_list):
"""
Joins two paths together
"""
return Init_ABFS.join(first, *comp_list)
# Create Files,directories, or File Systems
# --------------------------------
def mkdir(self, path, params=None, headers=None, *args, **kwargs):
"""
Makes a directory
"""
if params is None:
params = {}
params['resource'] = 'directory'
self._create_path(path, params=params, headers=params, overwrite=False)
def create(self, path, overwrite=False, data=None, headers=None, *args, **kwargs):
"""
Makes a File (Put text in data if adding data)
"""
params = {'resource': 'file'}
self._create_path(path, params=params, headers=headers, overwrite=overwrite)
if data:
self._writedata(path, data, len(data))
def create_home_dir(self, home_path):
# When ABFS raz is enabled, try to create user home dir for REMOTE_STORAGE_HOME path
if is_raz_abfs():
LOG.debug('Attempting to create user directory for path: %s' % home_path)
try:
if not self.exists(home_path):
self.mkdir(home_path)
else:
LOG.debug('Skipping user directory creation, the path already exists: %s' % home_path)
except Exception as e:
LOG.exception('Failed to create user home directory for path %s with error: %s' % (home_path, str(e)))
else:
LOG.info('Create home directory is not available for Azure filesystem')
def _create_path(self, path, params=None, headers=None, overwrite=False):
"""
Container method for Create
"""
file_system, dir_name = Init_ABFS.parse_uri(path)[:2]
if dir_name == '':
return self._create_fs(file_system)
no_scheme = file_system + '/' + dir_name
additional_header = self._getheaders()
if headers is not None:
additional_header.update(headers)
if not overwrite:
additional_header['If-None-Match'] = '*'
self._root.put(no_scheme, params, headers=additional_header)
def _create_fs(self, file_system):
"""
Creates a File System
"""
self._root.put(file_system, {'resource': 'filesystem'}, headers=self._getheaders())
# Read Files
# --------------------------------
def read(self, path, offset='0', length=0, *args, **kwargs):
"""
Read data from a file
"""
path = Init_ABFS.strip_scheme(path)
headers = self._getheaders()
if length != 0 and length != '0':
headers['range'] = 'bytes=%s-%s' % (str(offset), str(int(offset) + int(length)))
return self._root.get(path, headers=headers)
def open(self, path, option='r', *args, **kwargs):
"""
Returns an ABFSFile object that pretends that a file is open
"""
return ABFSFile(self, path, option)
# Alter Files
# --------------------------------
def append(self, path, data, offset=0):
if not data:
LOG.warning("There is no data to append to")
return
self._append(path, data)
return self.flush(path, {'position': int(len(data)) + int(offset)})
def _append(self, path, data, size=0, offset=0, params=None, **kwargs):
"""
Appends the data to a file
"""
path = Init_ABFS.strip_scheme(path)
if params is None:
LOG.warning("Params not specified, Append will take longer")
resp = self._stats(path)
params = {'position': int(resp['Content-Length']) + offset, 'action': 'append'}
else:
params['action'] = 'append'
headers = {}
if size == 0 or size == '0':
headers['Content-Length'] = str(len(data))
if headers['Content-Length'] == '0':
return
else:
headers['Content-Length'] = str(size)
return self._patching_sl(path, params, data, headers, **kwargs)
def flush(self, path, params=None, headers=None, **kwargs):
"""
Flushes the data(i.e. writes appended data to File)
"""
path = Init_ABFS.strip_scheme(path)
if params is None:
LOG.warning("Params not specified")
params = {'position': 0}
if 'position' not in params:
LOG.warning("Position is not specified")
params['position'] = 0
params['action'] = 'flush'
if headers is None:
headers = {}
headers['Content-Length'] = '0'
self._patching_sl(path, params, header=headers, **kwargs)
# Remove Filesystems, directories. or Files
# --------------------------------
def remove(self, path, skip_trash=True):
"""
Removes an item indicated in the path
Also removes empty directories
"""
self._delete(path, recursive='false', skip_trash=skip_trash)
def rmtree(self, path, skip_trash=True):
"""
Remove everything in a given directory
"""
self._delete(path, recursive='true', skip_trash=skip_trash)
def _delete(self, path, recursive='false', skip_trash=True):
"""
Wrapper function for calling delete, no support for trash or
"""
if not skip_trash:
raise NotImplementedError("Trash not implemented for ABFS")
if ABFS.isroot(path):
raise RuntimeError("Cannot Remove Root")
file_system, dir_name = Init_ABFS.parse_uri(path)[:2]
if dir_name == '':
return self._root.delete(file_system, {'resource': 'filesystem'}, headers=self._getheaders())
new_path = file_system + '/' + dir_name
param = None
if self.isdir(path):
param = {'recursive': recursive}
self._root.delete(new_path, param, headers=self._getheaders())
def restore(self, path):
raise NotImplementedError("")
# Edit permissions of Filesystems, directories. or Files
# --------------------------------
def chown(self, path, user=None, group=None, *args, **kwargs):
"""
Changes ownership (not implemented)
"""
headers = {}
if user is not None:
headers['x-ms-owner'] = user
if group is not None:
headers['x-ms-group'] = group
self.setAccessControl(path, headers=headers, **kwargs)
def chmod(self, path, permissionNumber=None, *args, **kwargs):
"""
Set File Permissions (passing as an int converts said integer to octal. Passing as a string assumes the string is in octal)
"""
header = {}
if permissionNumber is not None:
if isinstance(permissionNumber, basestring):
header['x-ms-permissions'] = str(permissionNumber)
else:
header['x-ms-permissions'] = oct(permissionNumber)
self.setAccessControl(path, headers=header)
def setAccessControl(self, path, headers, **kwargs):
"""
Set Access Controls (Can do both chmod and chown) (not implemented)
"""
path = Init_ABFS.strip_scheme(path)
params = {'action': 'setAccessControl'}
if headers is None:
headers = {}
self._patching_sl(path, params, header=headers, **kwargs)
def mktemp(self, subdir='', prefix='tmp', basedir=None):
raise NotImplementedError("")
def purge_trash(self):
raise NotImplementedError("")
# Handle file systems interactions
# --------------------------------
def copy(self, src, dst, *args, **kwargs):
"""
General Copying
"""
if self.isfile(src):
return self.copyfile(src, dst)
self.copy_remote_dir(src, dst)
def copyfile(self, src, dst, *args, **kwargs):
"""
Copies a File to another location
"""
new_path = dst + '/' + Init_ABFS.strip_path(src)
self.create(new_path)
chunk_size = self.get_upload_chuck_size()
file = self.read(src)
size = len(file)
self._writedata(new_path, file, size)
def copy_remote_dir(self, src, dst, *args, **kwargs):
"""
Copies the entire contents of a directory to another location
"""
dst = dst + '/' + Init_ABFS.strip_path(src)
self.mkdir(dst)
other_files = self.listdir(src)
for x in other_files:
x = src + '/' + Init_ABFS.strip_path(x)
self.copy(x, dst)
def rename(self, old, new):
"""
Renames a file
"""
headers = {'x-ms-rename-source': '/' + urllib_quote(Init_ABFS.strip_scheme(old))}
try:
self._create_path(new, headers=headers, overwrite=True)
except WebHdfsException as e:
if e.code == 409:
self.copy(old, new)
self.rmtree(old)
else:
raise e
def rename_star(self, old_dir, new_dir):
"""
Renames a directory
"""
self.rename(old_dir, new_dir)
def upload(self, file, path, *args, **kwargs):
"""
Upload is done by the client
"""
pass
def copyFromLocal(self, local_src, remote_dst, *args, **kwargs):
"""
Copy a directory or file from Local (Testing)
"""
local_src = local_src.endswith('/') and local_src[:-1] or local_src
remote_dst = remote_dst.endswith('/') and remote_dst[:-1] or remote_dst
if os.path.isdir(local_src):
self._local_copy_dir(local_src, remote_dst)
else:
(basename, filename) = os.path.split(local_src)
self._local_copy_file(local_src, self.isdir(remote_dst) and self.join(remote_dst, filename) or remote_dst)
def _local_copy_dir(self, local_src, remote_dst):
"""
A wraper function for copying local directories
"""
self.mkdir(remote_dir)
for f in os.listdir(local_dir):
local_src = os.path.join(local_dir, f)
remote_dst = self.join(remote_dir, f)
if os.path.isdir(local_src):
self._copy_dir(local_src, remote_dst, mode)
else:
self._copy_file(local_src, remote_dst)
def _local_copy_file(self, local_src, remote_dst, chunk_size=UPLOAD_CHUCK_SIZE):
"""
A wraper function for copying local Files
"""
if os.path.isfile(local_src):
if self.exists(remote_dst):
LOG.info('%s already exists. Skipping.' % remote_dst)
return
src = file(local_src)
try:
try:
self.create(remote_dst)
chunk = src.read(chunk_size)
offset = 0
while chunk:
size = len(chunk)
self._append(remote_dst, chunk, size=size, params={'position': offset})
offset += size
chunk = src.read(chunk_size)
self.flush(remote_dst, params={'position': offset})
except:
LOG.exception(_('Copying %s -> %s failed.') % (local_src, remote_dst))
raise
finally:
src.close()
else:
LOG.info(_('Skipping %s (not a file).') % local_src)
def check_access(self, path, *args, **kwargs):
"""
Check access of a file/directory (Work in Progress/Not Ready)
"""
raise NotImplementedError("")
try:
status = self.stats(path)
if 'x-ms-permissions' not in status.keys():
raise b
except b:
LOG.debug("Permisions have not been set")
except:
Exception
def mkswap(self, filename, subdir='', suffix='swp', basedir=None):
"""
Makes a directory and returns a potential filename for that directory
"""
base = self.join(basedir or self._temp_dir, subdir)
if not self.isdir(base):
self.mkdir(base)
candidate = self.join(base, "%s.%s" % (filename, suffix))
return candidate
def setuser(self, user):
"""
Changes the User
"""
self._user = user
def get_upload_chuck_size(self):
"""
Gets the maximum size allowed to upload
"""
return UPLOAD_CHUCK_SIZE
def filebrowser_action(self):
return self._filebrowser_action
# Other Methods to condense stuff
#----------------------------
# Write Files on creation
#----------------------------
def _writedata(self, path, data, size):
"""
Adds text to a given file
"""
chunk_size = self.get_upload_chuck_size()
cycles = ceil(float(size) / chunk_size)
for i in range(0, cycles):
chunk = size % chunk_size
if i != cycles or chunk == 0:
length = chunk_size
else:
length = chunk
self._append(path, data[i*chunk_size:i*chunk_size + length], length)
self.flush(path, {'position': int(size)})
# Use Patch HTTP request
#----------------------------
def _patching_sl(self, schemeless_path, param, data=None, header=None, **kwargs):
"""
A wraper function for patch
"""
if header is None:
header = {}
header.update(self._getheaders())
return self._root.invoke('PATCH', schemeless_path, param, data, headers=header, **kwargs)
| {
"content_hash": "f049459262359cf38b16ad0d65f8127d",
"timestamp": "",
"source": "github",
"line_count": 721,
"max_line_length": 127,
"avg_line_length": 29.328710124826628,
"alnum_prop": 0.6243261136858035,
"repo_name": "cloudera/hue",
"id": "c8d2ad9604de9292c2f6ea4452825d4d5e0c3d08",
"size": "21938",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "desktop/libs/azure/src/azure/abfs/abfs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import json
from bson.objectid import ObjectId
import gridfs
from pymongo import MongoClient
from turbo.model import BaseModel
from turbo.util import PY3, basestring_type as basestring, utf8
from util import unittest, fake_ids, fake_ids_2
if PY3:
from io import StringIO
else:
from cStringIO import StringIO
mc = MongoClient()
class Tag(BaseModel):
name = 'tag'
field = {
'list': (list, []),
'imgid': (ObjectId, None),
'uid': (ObjectId, None),
'name': (basestring, None),
'value': (int, 0),
'atime': (datetime.datetime, None),
'up': (dict, {}),
}
def __init__(self):
db = {
'db': {'test': mc['test']},
'db_file': {'test': gridfs.GridFS(mc['test_files'])}
}
super(Tag, self).__init__('test', db)
def write_action_call(self, name, *args, **kwargs):
pass
class BaseModelTest(unittest.TestCase):
def setUp(self):
self.tb_tag = Tag()
self._make_data()
def tearDown(self):
self._clear_data()
del self.tb_tag
def _make_data(self):
for index, i in enumerate(fake_ids):
self.tb_tag.insert_one({'_id': i, 'value': index})
def _clear_data(self):
for index, i in enumerate(fake_ids):
self.tb_tag.remove_by_id(i)
for index, i in enumerate(fake_ids_2):
self.tb_tag.remove_by_id(i)
def test_insert(self):
_id = self.tb_tag.insert({'value': 0})
self.assertIsNot(_id, None)
record = {
'list': [
{'key': ObjectId(), 'key2': 'test', 'key3': ObjectId()},
10,
12,
13,
['name', 'name', 'name', ObjectId(), ObjectId()],
datetime.datetime.now(),
],
'imgid': ObjectId(),
'up': {
'key1': ObjectId(),
'key2': ObjectId(),
'key3': ObjectId(),
}
}
result = self.tb_tag.insert(record)
self.assertIsInstance(result, ObjectId)
# insert one
_id = self.tb_tag.insert({'_id': fake_ids_2[0]})
self.assertEqual(_id, fake_ids_2[0])
result = self.tb_tag.find_by_id(fake_ids_2[0])
self.assertEqual(result['value'], 0)
# insert not field
with self.assertRaises(Exception):
result = self.tb_tag.insert({'nokey': 10})
# check
_id = self.tb_tag.insert({'imgid': None})
self.assertIsNot(_id, None)
result = self.tb_tag.find_by_id(_id)
self.assertEqual(result['value'], 0)
self.tb_tag.remove_by_id(_id)
docs = [{'_id': i} for i in fake_ids_2[1:]]
result = self.tb_tag.insert(docs)
self.assertEqual(1, len(result))
for i in result:
self.assertIn(i, fake_ids_2)
result = self.tb_tag.find_by_id(fake_ids_2[1:])
self.assertEqual(1, len(result))
for i in result:
self.assertEqual(i['value'], 0)
def test_save(self):
_id = self.tb_tag.save({'value': 0})
self.assertIsNot(_id, None)
_id = self.tb_tag.save({'_id': _id, 'value': 10})
result = self.tb_tag.find_by_id(_id)
self.assertEqual(result['value'], 10)
self.tb_tag.remove_by_id(_id)
def test_update(self):
with self.assertRaises(ValueError):
self.tb_tag.update({}, {'hello': 0})
self.tb_tag.update({}, {'$set': {'hello': 0}})
# update all empty
with self.assertRaises(ValueError):
self.tb_tag.update({}, {})
# update one
self.tb_tag.update({'_id': fake_ids[0]}, {'$set': {'value': 11}})
result = self.tb_tag.find_by_id(fake_ids[0])
self.assertEqual(result['value'], 11)
# update one
result = self.tb_tag.update({'_id': {'$in': fake_ids[1:11]}}, {
'$set': {'value': 11}})
self.assertEqual(result.matched_count, 1)
if result.modified_count is not None:
self.assertEqual(result.modified_count, 1)
# update many
result = self.tb_tag.update({'_id': {'$in': fake_ids[2:12]}}, {
'$set': {'value': -1}}, multi=True)
print(result)
self.assertEqual(result.matched_count, 10)
# modified_count 反应的是实际修改的文档数目,如果要修改键的值和修改值是相同的,文档不会被修改
if result.modified_count is not None:
self.assertEqual(result.modified_count, 10)
def test_remove(self):
# not allow remove all
with self.assertRaises(Exception):
self.tb_tag.remove({})
result = self.tb_tag.find_by_id(fake_ids[0])
self.assertEqual(result['_id'], fake_ids[0])
self.tb_tag.remove({'_id': fake_ids[0]})
result = self.tb_tag.find_by_id(fake_ids[0])
self.assertEqual(result, None)
# remove one
self.tb_tag.remove({'_id': {'$in': fake_ids[2:12]}})
result = self.tb_tag.find_by_id(fake_ids[2:12])
self.assertEqual(len(list(result)), 9)
# remove many
self.tb_tag.remove({'_id': {'$in': fake_ids[3:13]}}, multi=True)
result = self.tb_tag.find_by_id(fake_ids[3:13])
self.assertEqual(len(list(result)), 0)
def test_insert_one(self):
result = self.tb_tag.insert_one({'value': 0})
self.assertIsNot(result.inserted_id, None)
def test_find_one(self):
_id = self.tb_tag.insert({'value': 100})
self.assertEqual(self.tb_tag.find_one({'_id': _id})['value'], 100)
with self.assertRaises(KeyError):
self.tb_tag.find_one({'_id': _id})['nokey']
self.assertIsNone(
self.tb_tag.find_one({'_id': _id}, wrapper=True)['nokey'])
self.assertIsNone(self.tb_tag.find_one(
{'_id': ObjectId()}, wrapper=True))
def test_find(self):
self.assertGreater(len(list(self.tb_tag.find())), 0)
for i in list(self.tb_tag.find()):
with self.assertRaises(KeyError):
i['nokey']
for i in list(self.tb_tag.find(wrapper=True)):
self.assertIsNone(i['nokey'])
def test_update_one(self):
with self.assertRaises(ValueError):
self.tb_tag.update_one({}, {'hello': 0})
self.tb_tag.update_one({}, {'$set': {'hellow': 0}})
def test_update_many(self):
with self.assertRaises(ValueError):
self.tb_tag.update_many({}, {'hello': 0})
self.tb_tag.update_many({}, {'$set': {'value': 1}})
for i in list(self.tb_tag.find()):
self.assertEqual(i['value'], 1)
def test_delete_many(self):
with self.assertRaises(Exception):
self.tb_tag.delete_many({})
for i in fake_ids:
self.tb_tag.delete_many({'_id': i})
self.assertIsNone(self.tb_tag.find_by_id(i))
def test_find_by_id(self):
self.assertEqual(self.tb_tag.find_by_id(
fake_ids[0])['_id'], fake_ids[0])
for index, i in enumerate(self.tb_tag.find_by_id(fake_ids[0:10])):
self.assertEqual(i['_id'], fake_ids[index])
def test_remove_by_id(self):
for i in fake_ids[0:10]:
self.tb_tag.remove_by_id(i)
self.assertIsNone(self.tb_tag.find_by_id(i))
result = self.tb_tag.remove_by_id(fake_ids[10:20])
for i in fake_ids[10:20]:
self.assertIsNone(self.tb_tag.find_by_id(i))
self.assertEqual(result.deleted_count, 10)
def test_get_as_dict(self):
as_dict, as_list = self.tb_tag.get_as_dict(
{'_id': {'$in': fake_ids[0:10]}})
for index, i in enumerate(as_dict.keys()):
self.assertIn(i, fake_ids[0:10])
for i in as_list:
self.assertIn(i['_id'], fake_ids[0:10])
def test_to_objectid(self):
self.assertTrue(self.tb_tag.to_objectid(None) is None)
self.assertEqual(self.tb_tag.to_objectid('52c8fb6f1d41c820f1124350'),
ObjectId('52c8fb6f1d41c820f1124350'), 'to_objectid is fail')
def test_create_model(self):
self.assertEqual(self.tb_tag.create_model(
'tag').find_one() is not None, True)
def test_pymongo_collection_method(self):
self.assertEqual(self.tb_tag.full_name, 'test.tag')
def test_sub_collection(self):
self.assertEqual(self.tb_tag.sub_collection(
'test').full_name, 'test.tag.test')
def test_inc(self):
for index, i in enumerate(self.tb_tag.find_by_id(fake_ids[0:3])):
self.assertEqual(i['value'], index)
self.tb_tag.inc(
{'_id': {'$in': fake_ids[0:3]}}, 'value', 1, multi=True)
for index, i in enumerate(self.tb_tag.find_by_id(fake_ids[0:3])):
self.assertEqual(i['value'], index + 1)
self.tb_tag.inc({'_id': fake_ids[4]}, 'value', 1)
self.assertEqual(self.tb_tag.find_by_id(fake_ids[4])['value'], 5)
def test_to_str(self):
one = self.tb_tag.to_str(self.tb_tag.find(limit=10))
self.assertTrue(isinstance(json.dumps(one), basestring))
def test_to_one_str(self):
one = self.tb_tag.to_one_str(self.tb_tag.find_one())
self.assertTrue(isinstance(json.dumps(one), basestring))
def test_write_action_call(self):
def func(se, name, *args, **kwargs):
self.assertEqual(name, 'save')
def func2(se, name, *args, **kwargs):
self.assertEqual(name, 'find')
self.tb_tag.save({'value': 0})
self.tb_tag.find()
def test_default_encode(self):
self.assertTrue(isinstance(
self.tb_tag.default_encode(ObjectId()), basestring))
self.assertTrue(isinstance(
self.tb_tag.default_encode(datetime.datetime.now()), float))
self.assertEqual(self.tb_tag.default_encode('string'), 'string')
def test_put(self):
value = 'hello word'
s = StringIO()
s.write(value)
# put
file_id = self.tb_tag.put(utf8(s.getvalue()))
self.assertTrue(isinstance(file_id, ObjectId))
# get
one = self.tb_tag.get(file_id)
self.assertTrue(getattr(one, 'read', False), 'test get fail')
self.assertEqual(one.read(), utf8(value))
def test_create(self):
_id = self.tb_tag.create({'value': 0})
self.assertIsNot(_id, None)
record = {
'list': [
{'key': ObjectId(), 'key2': 'test', 'key3': ObjectId()},
10,
12,
13,
['name', 'name', 'name', ObjectId(), ObjectId()],
datetime.datetime.now(),
],
'imgid': ObjectId(),
'up': {
'key1': ObjectId(),
'key2': ObjectId(),
'key3': ObjectId(),
}
}
result = self.tb_tag.create(record)
self.assertIsInstance(result, ObjectId)
# create one
_id = self.tb_tag.create({'_id': fake_ids_2[0]})
self.assertEqual(_id, fake_ids_2[0])
result = self.tb_tag.find_by_id(fake_ids_2[0])
self.assertEqual(result['value'], 0)
# create not field
with self.assertRaises(Exception):
result = self.tb_tag.create({'nokey': 10})
# check
_id = self.tb_tag.create({'imgid': None})
self.assertIsNot(_id, None)
result = self.tb_tag.find_by_id(_id)
self.assertEqual(result['value'], 0)
self.tb_tag.remove_by_id(_id)
docs = [{'_id': i} for i in fake_ids_2[1:]]
result = self.tb_tag.create(docs)
self.assertEqual(1, len(result))
for i in result:
self.assertIn(i, fake_ids_2)
result = self.tb_tag.find_by_id(fake_ids_2[1:])
self.assertEqual(1, len(result))
for i in result:
self.assertEqual(i['value'], 0)
def log(self, one):
print(one)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "ef627c2fe9d20637086ae8924b73e625",
"timestamp": "",
"source": "github",
"line_count": 368,
"max_line_length": 85,
"avg_line_length": 32.96467391304348,
"alnum_prop": 0.5410930673481164,
"repo_name": "wecatch/app-turbo",
"id": "b56427b3d7ffb67dacdb03ea89f9ce32fb777ceb",
"size": "12230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2127"
},
{
"name": "Python",
"bytes": "113614"
}
],
"symlink_target": ""
} |
_exportpath = '/sys/class/gpio/gpiochip0/subsystem/export'
_unexportpath = '/sys/class/gpio/gpiochip0/subsystem/unexport'
_gpiopath = '/sys/class/gpio/gpio'
# Write to the GPIO fs. Seems to need to close to take effect
def writetofs(fname,data):
f=open(fname,'w')
f.write(data)
f.close()
#Read from GPIO fs.
def readfromfs(fname):
f=open(fname,'r')
s=f.read()
f.close()
if s[0]=='1':
v=1
else:
v=0
return v
def readinput(pin):
rdval=readfromfs(_gpiopath+str(pin)+'/value')
return rdval
# "Export" the pin and set the direction
def initpin(pin,direction):
writetofs(_exportpath,str(pin))
writetofs(_gpiopath+str(pin)+'/direction',direction)
# Release the pin when done
def closepin(pin):
writetofs(_unexportpath,str(pin))
# Set the output value. Write a 1 or 0 (numeric). Any non-zero value for high
def setoutput(pin,value):
if value == 0:
wrval='0'
else:
wrval='1'
writetofs(_gpiopath+str(pin)+'/value',wrval)
| {
"content_hash": "0d47997e79527ad0434dd5c265cfc0b0",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 77,
"avg_line_length": 23.047619047619047,
"alnum_prop": 0.6838842975206612,
"repo_name": "msoftware/onion_omega2_hex_keypad",
"id": "8b3fa8d157c0c38afdc79eaf98fd4cade154805b",
"size": "999",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "omega_gpio.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2473"
}
],
"symlink_target": ""
} |
import json
import telegram
from rq.decorators import job
from redis_wrap import SYSTEMS
import config
import botcommands
from utils import *
bot = None
@job('reply', connection=SYSTEMS['default'], result_ttl=5)
def handle_update(update, telegram_bot=None):
global bot
if telegram_bot is None:
print 'no bot'
bot = telegram.Bot(token=config.TOKEN)
else:
bot = telegram_bot
message = update.message
# Save conversation info
conversations = config.get('conversations')
str_chat_id = smart_text(message.chat_id)
if message.left_chat_participant is not None:
if smart_text(
message.left_chat_participant.name[1:]
) == config.__name__:
del conversations[str_chat_id]
return
# Store chat info if it does not exist
if str_chat_id not in conversations:
if isinstance(message.chat, (telegram.User, )):
conversations[str_chat_id] = message.chat.name
elif isinstance(message.chat, (telegram.GroupChat, )):
conversations[str_chat_id] = message.chat.title
else:
# Update chat info if it changed
if isinstance(message.chat, (telegram.User, ))\
and smart_text(
message.chat.name
) != smart_text(conversations[str_chat_id]):
conversations[str_chat_id] = message.chat.name
elif isinstance(message.chat, (telegram.GroupChat, ))\
and smart_text(
message.chat.title
) != smart_text(conversations[str_chat_id]):
conversations[str_chat_id] = message.chat.title
if message.text:
text = message.text.strip()
if text.startswith('/'):
handle_command(text, message)
else:
handle_text(text, message)
elif message.photo:
pass
elif message.video:
pass
elif message.document:
pass
elif message.audio:
pass
elif message.location:
pass
def handle_command(text, message, debug=False):
# Admins can toggle debug mode for commands
if '/debug' in text \
and message.from_user.name in config.get('admins'):
debug = True
if '@' in text and config.__name__ not in text.split('@'):
return
command, options, words = extract_texts(message.text)
if not smart_text(command).isalnum():
return send_reply(text='机器人酱并不懂你发的那是什么玩意', message=message)
if command in ('ls', 'help', ):
return send_reply(text=list_commands(message, debug=debug),
message=message)
if hasattr(botcommands, command):
result = getattr(botcommands, command)(message, debug=debug)
if result is not None:
return send_reply(text=result, message=message)
if debug:
text = u'%s 命令现在并没有什么卯月' % command
send_reply(text=text, message=message)
@job('reply', connection=SYSTEMS['default'], result_ttl=5)
def handle_pi_command(msg_payload, telegram_bot=None):
global bot
if telegram_bot is None:
bot = telegram.Bot(token=config.TOKEN)
else:
bot = telegram_bot
try:
msg = json.loads(msg_payload)
reply_to = telegram.Message.de_json(msg['reply_to'])
return send_reply(text=msg.get('text', None),
photo=msg.get('photo', None),
emoji=msg.get('emoji', None),
audio=msg.get('audio', None),
video=msg.get('video', None),
location=msg.get('location', None),
message=reply_to)
except Exception:
try:
return send_plain_text(
text=msg.get('text', None),
photo=msg.get('photo', None),
message=reply_to
)
except Exception:
print extract_traceback()
def list_commands(msg, debug=False):
'''List all commands available'''
commands = []
for command in dir(botcommands):
attr = getattr(botcommands, command)
if callable(attr):
commands.append('%s - %s\n' % (command, attr.func_doc, ))
commands.append('help - 列出所有可用的命令')
return ''.join(commands)
def handle_text(text, message):
text = u'%s: %s' % (u'复读机', text, )
# send_reply(text=text, message=message)
def send_plain_text(text=None, photo=None, message=None, reply=True):
if photo and 'http' in photo:
content = photo
elif text:
content = text
bot.sendChatAction(chat_id=message.chat_id,
action='typing')
bot.sendMessage(message.chat_id,
smart_text(content),
reply_to_message_id=message.message_id)
def send_reply(text=None, photo=None, emoji=None,
audio=None, video=None, fileobj=None,
location=None, message=None, reply=True):
if not message:
raise RuntimeError('Dont know the chat id')
# Currently text reply is the only supported type
action = 'typing'
if photo:
action = 'upload_photo'
bot.sendChatAction(chat_id=message.chat_id,
action=action)
bot.sendPhoto(message.chat_id,
photo,
reply_to_message_id=message.message_id)
return
elif audio:
action = 'upload_audio'
elif video:
action = 'upload_video'
elif fileobj:
action = 'upload_document'
elif location:
action = 'find_location'
bot.sendChatAction(chat_id=message.chat_id,
action=action)
bot.sendMessage(message.chat_id,
smart_text(text),
reply_to_message_id=message.message_id)
| {
"content_hash": "6561a425d8ccada40fedffaa3195f778",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 69,
"avg_line_length": 33.25142857142857,
"alnum_prop": 0.5794810104829008,
"repo_name": "JokerQyou/bot",
"id": "12b16dc8d4b3c2f5e04497758e12ddb41f9316da",
"size": "5913",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "operations.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "21587"
}
],
"symlink_target": ""
} |
from django.db import models
import binascii
class HashField(models.BinaryField):
description = ('HashField is related to some other field in a model and'
'stores its hashed value for better indexing performance.')
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 16
kwargs.setdefault('db_index', True)
kwargs.setdefault('editable', False)
super(HashField, self).__init__(*args, **kwargs)
def from_db_value(self, value, expression, connection, context):
if value:
return binascii.hexlify(value).decode("ascii")
def get_prep_value(self, value):
if value:
return binascii.unhexlify(value)
| {
"content_hash": "2c9645488648012967efce8ed2cb26ca",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 76,
"avg_line_length": 35.3,
"alnum_prop": 0.6402266288951841,
"repo_name": "amcat/django-hash-field",
"id": "4563cc48a04909a9d4ca95de626b75baeb788cec",
"size": "706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_hash_field/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5884"
}
],
"symlink_target": ""
} |
"""
MIT License
Copyright (c) 2016 William Tumeo
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from .plugin import GitRepoPlugin | {
"content_hash": "4e9e06e2f8334a6df64e43eb309bcb1e",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 78,
"avg_line_length": 44.48,
"alnum_prop": 0.8012589928057554,
"repo_name": "williamd1k0/shiori",
"id": "63b1838d6f816cfb1dc89c9e74159dc1a1b47785",
"size": "1113",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shiori/plugins/gitrepo/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "78517"
}
],
"symlink_target": ""
} |
import sys,tty,termios,time
import RPi.GPIO as GPIO
LEFT_PIN = 26
RIGHT_PIN = 16
STATUS_PIN = 13
class _Getch:
def __call__(self):
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def forward():
GPIO.output(LEFT_PIN, True)
time.sleep(0.1)
GPIO.output(RIGHT_PIN, True)
def left():
GPIO.output(LEFT_PIN, True)
GPIO.output(RIGHT_PIN, False)
def right():
GPIO.output(LEFT_PIN, False)
GPIO.output(RIGHT_PIN, True)
def stop():
GPIO.output((LEFT_PIN, RIGHT_PIN), False)
def get():
inkey = _Getch()
while(1):
k=inkey()
if k!='':break
if k == 'w':
forward()
elif k == 'a':
left()
elif k == 'd':
right()
elif k == 's':
stop()
elif k == 'p':
return False
return True
def main():
print "Hello Spider"
GPIO.setmode(GPIO.BCM)
GPIO.setup(LEFT_PIN, GPIO.OUT)
GPIO.setup(RIGHT_PIN, GPIO.OUT)
GPIO.setup(STATUS_PIN, GPIO.OUT)
GPIO.output(STATUS_PIN, True)
while get():
print "Still Running"
print "Exiting"
GPIO.output(STATUS_PIN, False)
GPIO.cleanup()
if __name__=='__main__':
main()
| {
"content_hash": "121c2c501488fde7bdd471b631bc222d",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 66,
"avg_line_length": 19.408450704225352,
"alnum_prop": 0.5587808417997098,
"repo_name": "MaxxDelusional/SpiderPi",
"id": "757f2e895f44c1f4325f446bc5e35788a34f1fc0",
"size": "1378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1814"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.contrib.gis.db import models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='RasterLayer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, null=True, blank=True)),
('description', models.TextField(null=True, blank=True)),
('datatype', models.CharField(default=b'co', max_length=2, choices=[(b'co', b'Continuous'), (b'ca', b'Categorical'), (b'ma', b'Mask'), (b'ro', b'Rank Ordered')])),
('rasterfile', models.FileField(upload_to=b'rasters')),
('srid', models.CharField(default=b'3086', max_length=10)),
('nodata', models.CharField(default=b'-9999', max_length=100)),
('parse_log', models.TextField(default=b'', null=True, editable=False, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RasterTile',
fields=[
('rid', models.AutoField(serialize=False, primary_key=True)),
('rast', models.RasterField(null=True, blank=True)),
('filename', models.TextField(null=True, blank=True)),
('rasterlayer', models.ForeignKey(blank=True, to='raster.RasterLayer', null=True)),
],
options={
},
bases=(models.Model,),
),
]
| {
"content_hash": "a904837c6ad84288f05e40a47ac2afab",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 179,
"avg_line_length": 40.829268292682926,
"alnum_prop": 0.548984468339307,
"repo_name": "PADAS/django-raster",
"id": "5dfc049a867f1ee9046f0ab3bb1b10c4b6fff4dd",
"size": "1698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "raster/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "419"
},
{
"name": "Python",
"bytes": "161499"
}
],
"symlink_target": ""
} |
"""Calculate similarity scores based on the Jaccard distance between events."""
from __future__ import unicode_literals
from timesketch.lib import similarity
from timesketch.lib.analyzers import interface
from timesketch.lib.analyzers import manager
class SimilarityScorerConfig(object):
"""Configuration for a similarity scorer."""
# Parameters for Jaccard and Minhash calculations.
DEFAULT_THRESHOLD = 0.5
DEFAULT_PERMUTATIONS = 128
DEFAULT_CONFIG = {
"field": "message",
"delimiters": [" ", "-", "/"],
"threshold": DEFAULT_THRESHOLD,
"num_perm": DEFAULT_PERMUTATIONS,
}
# For any data_type that need custom config parameters.
# TODO: Move this to its own file.
# TODO: Add stopwords boolean config parameter.
# TODO: Add remove_words boolean config parameter.
CONFIG_REGISTRY = {
"windows:evtx:record": {
"query": 'data_type:"windows:evtx:record"',
"field": "message",
"delimiters": [" ", "-", "/"],
"threshold": DEFAULT_THRESHOLD,
"num_perm": DEFAULT_PERMUTATIONS,
}
}
def __init__(self, index_name, data_type):
"""Initializes a similarity scorer config.
Args:
index_name: OpenSearch index name.
data_type: Name of the data_type.
"""
self._index_name = index_name
self._data_type = data_type
for k, v in self._get_config().items():
setattr(self, k, v)
def _get_config(self):
"""Get config for supplied data_type.
Returns:
Dictionary with configuration parameters.
"""
config_dict = self.CONFIG_REGISTRY.get(self._data_type)
# If there is no config for this data_type, use default config and set
# the query based on the data_type.
if not config_dict:
config_dict = self.DEFAULT_CONFIG
config_dict["query"] = 'data_type:"{0}"'.format(self._data_type)
config_dict["index_name"] = self._index_name
config_dict["data_type"] = self._data_type
return config_dict
class SimilarityScorer(interface.BaseAnalyzer):
"""Score events based on Jaccard distance."""
NAME = "similarity_scorer"
DISPLAY_NAME = "Similarity Scorer"
DESCRIPTION = (
"Experimental: Calculate similarity scores based on the "
"Jaccard distance between events"
)
DEPENDENCIES = frozenset()
def __init__(self, index_name, sketch_id, timeline_id=None, data_type=None):
"""Initializes a similarity scorer.
Args:
index_name: OpenSearch index name.
sketch_id: The ID of the sketch.
timeline_id: The ID of the timeline.
data_type: Name of the data_type.
"""
self._config = None
if data_type:
self._config = SimilarityScorerConfig(index_name, data_type)
super().__init__(index_name, sketch_id, timeline_id=timeline_id)
def run(self):
"""Entry point for the SimilarityScorer.
Returns:
A dict with metadata about the processed data set or None if no
data_types has been configured.
"""
if not self._config:
return "No data_type specified."
# Event generator for streaming results.
events = self.event_stream(
query_string=self._config.query, return_fields=[self._config.field]
)
lsh, minhashes = similarity.new_lsh_index(
events,
field=self._config.field,
delimiters=self._config.delimiters,
num_perm=self._config.num_perm,
threshold=self._config.threshold,
)
total_num_events = len(minhashes)
for key, minhash in minhashes.items():
event_id, index_name = key
event_dict = dict(_id=event_id, _index=index_name)
event = interface.Event(event_dict, self.datastore)
score = similarity.calculate_score(lsh, minhash, total_num_events)
attributes_to_add = {"similarity_score": score}
event.add_attributes(attributes_to_add)
# Commit the event to the datastore.
event.commit()
msg = "Similarity scorer processed {0:d} events for data_type {1:s}"
return msg.format(total_num_events, self._config.data_type)
manager.AnalysisManager.register_analyzer(SimilarityScorer)
| {
"content_hash": "b4893a9511360d720173c8e682a3fa61",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 80,
"avg_line_length": 33.84848484848485,
"alnum_prop": 0.605863921217547,
"repo_name": "google/timesketch",
"id": "99204b788843fe0565fb2353a72c3403733dd666",
"size": "5064",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "timesketch/lib/analyzers/similarity_scorer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "245"
},
{
"name": "Dockerfile",
"bytes": "3735"
},
{
"name": "HTML",
"bytes": "8718"
},
{
"name": "JavaScript",
"bytes": "97456"
},
{
"name": "Jupyter Notebook",
"bytes": "340247"
},
{
"name": "Makefile",
"bytes": "593"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "PowerShell",
"bytes": "7120"
},
{
"name": "Python",
"bytes": "1859758"
},
{
"name": "SCSS",
"bytes": "17377"
},
{
"name": "Shell",
"bytes": "22830"
},
{
"name": "Vue",
"bytes": "584825"
}
],
"symlink_target": ""
} |
import os
from importlib import import_module
from databuild.loader import load_classpath
from databuild.buildfile import BuildFile
from databuild.utils import multiglob
class Builder(object):
def __init__(self, settings='databuild.settings'):
self.settings = import_module(settings)
AdapterClass = load_classpath(self.settings.ADAPTER)
self.book = AdapterClass(settings=self.settings)
super(Builder, self).__init__()
def build(self, build_file_or_dir, echo=False):
self.book.echo = echo
if os.path.isfile(build_file_or_dir):
build_files = [BuildFile(build_file_or_dir)]
else:
globs = (
os.path.join(build_file_or_dir, "*.json"),
os.path.join(build_file_or_dir, "*.yaml"),
os.path.join(build_file_or_dir, "*.yml"),
)
build_files = map(BuildFile, sorted(multiglob(*globs)))
self.book.apply_operations(build_files, echo=echo)
return self.book
| {
"content_hash": "341b6b72ca41d11e8891cfa74cc4c317",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 67,
"avg_line_length": 35.55172413793103,
"alnum_prop": 0.6188166828322017,
"repo_name": "databuild/databuild",
"id": "0ee7c6d4641f49a96850aadf5d7cbbcf07da6d69",
"size": "1031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "databuild/builder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "72938"
},
{
"name": "Shell",
"bytes": "6707"
}
],
"symlink_target": ""
} |
"""
"""
from __future__ import unicode_literals, absolute_import
from autobit import config
from autobit import ConfigError
from autobit.tracker import Tracker
from autobit.classification import MediaType
class BroadcastTheNet(Tracker):
def __init__(self):
self.name = "btn"
self._authkey = ""
self._passkey = ""
super().__init__()
def parse_line(self, message: str):
pcs = message.split(" | ")
rls_name = pcs[-1]
torrent_id = pcs[10]
return self.make_release(rls_name, torrent_id, MediaType.TV)
def download(self, release) -> bytes:
url = "https://broadcasthe.net/torrents.php?action=download&id={}&authkey={}&torrent_pass={}".format(
release.torrent_id, self._authkey, self._passkey
)
return self._fetch(url)
def reconfigure(self):
try:
self._authkey = config['BTN_AUTHKEY']
self._passkey = config['BTN_PASSKEY']
if all([self._authkey, self._passkey, len(self._authkey) == 32, len(self._passkey) == 32]):
raise ConfigError()
except (KeyError, ConfigError):
self.disable()
else:
self.enable()
def upload(self, release_name, torrent_file) -> bool:
raise NotImplementedError()
| {
"content_hash": "e75a708a19b8584b209c4a039ce2605f",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 109,
"avg_line_length": 31.214285714285715,
"alnum_prop": 0.5949656750572082,
"repo_name": "leighmacdonald/autobit",
"id": "d7e510c6c7caf0695c05d43863a178e3c288f98d",
"size": "1335",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autobit/tracker/btn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45393"
}
],
"symlink_target": ""
} |
import os
import sc2reader
import Player
import traceback
class GameProcessor:
CONST_FPS = 16.0
CONST_EXT = '.SC2Replay'
def __init__(self):
self.path = ""
self.files = list()
self.exclude = ""
self.processed = dict()
print("GameProcessor created")
def __init__(self, path, exclude):
self.path = path
self.files = list()
self.exlude = exclude
self.processed = dict()
self.processed[exclude] = 1
print("GameProcessor created with path: " + path)
def findFiles(self):
#each dirpath has subfolders and files
#each subfolder will get its own "dirpath"
#this will find each file
for dirpath, dirnames, filenames in os.walk(self.path):
for filename in filenames:
(root, ext) = os.path.splitext(filename)
#We only care about files ending in .SC2Replay
if ext == GameProcessor.CONST_EXT:
self.files.append(os.path.join(dirpath,filename))
#print(self.files[-1])
#Takes a possible queue for progress updates, this is useful if the code
#using this is running in a separate thread from the main stuff
def processFiles(self, progQueue=None):
players = list()
fileCount = len(self.files)
for filepath in self.files:
if filepath not in self.processed:
self.processed[filepath] = 1
total_time = curReplay.frames/GameProcessor.CONST_FPS
freqDists = dict()
print(curReplay.filename)
for player in curReplay.players:
freqDists[player.uid] = [0]*10
print(player.uid)
#This for loop takes on the order of 10^4 iterations per
#replay, but it is not a performance bottleneck
for event in curReplay.events:
#Notice we check that the event was spawned by an actual
#player
if event.name == 'GetFromHotkeyEvent' and event.control_group < 10 and event.pid in freqDists.keys():
(freqDists[event.pid])[event.control_group] = (freqDists[event.pid])[event.control_group]+1
for key in freqDists.keys():
freqDists[key] = [freq/total_time for freq in freqDists[key]]
player = Player.Player(lookupName(key, curReplay), freqDists[key], lookupRace(key, curReplay), curReplay.map_name, curReplay.filename)
players.append(player)
return players;
#Multiprocessing version of processFiles
def processFiles_mp(self, progQueue, errqueue):
players = list()
fileCount = len(self.files)
for filepath in self.files:
if filepath not in self.processed:
self.processed[filepath] = 1
try:
curReplay = sc2reader.load_replay(filepath)
except Exception as e:
progQueue.put('FATAL')
errqueue.put([filepath,e,traceback.format_exc()])
#This file was bad
if isinstance(e, AttributeError):
progQueue.put(str(100.0/fileCount))
continue
else:
return None
#return players
total_time = curReplay.frames/GameProcessor.CONST_FPS
freqDists = dict()
print(curReplay.filename)
for player in curReplay.players:
freqDists[player.uid] = [0]*10
print(player.uid)
#This for loop takes on the order of 10^4 iterations per
#replay, but it is not a performance bottleneck
for event in curReplay.events:
#Notice we check that the event was spawned by an actual
#player
if event.name == 'GetFromHotkeyEvent' and event.control_group < 10 and event.pid in freqDists.keys():
(freqDists[event.pid])[event.control_group] = (freqDists[event.pid])[event.control_group]+1
for key in freqDists.keys():
freqDists[key] = [freq/total_time for freq in freqDists[key]]
player = Player.Player(lookupName(key, curReplay), freqDists[key], lookupRace(key, curReplay), curReplay.map_name, curReplay.filename)
players.append(player)
progQueue.put(str(100.0/fileCount))
return players
def processFile(singlefile):
curReplay = sc2reader.load_replay(singlefile)
freqDists = dict()
total_time = curReplay.frames/GameProcessor.CONST_FPS
players = list()
for player in curReplay.players:
freqDists[player.uid] = [0]*10
for event in curReplay.events:
if event.name == 'GetFromHotkeyEvent' and event.control_group < 10 and event.pid in freqDists.keys():
(freqDists[event.pid])[event.control_group] = (freqDists[event.pid])[event.control_group]+1
for key in freqDists.keys():
freqDists[key] = [freq/total_time for freq in freqDists[key]]
player = Player.Player(lookupName(key, curReplay), freqDists[key], lookupRace(key, curReplay), curReplay.map_name, curReplay.filename)
players.append(player)
return players
#We implement these functions because looking up player names by uid may be
#janky in a future version of sc2reader
def lookupName(uid, replay):
for player in replay.players:
if player.uid == uid:
return player.name
def lookupRace(uid, replay):
for player in replay.players:
if player.uid == uid:
return player.pick_race
| {
"content_hash": "71aa6762356e491e981603bdc1402353",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 154,
"avg_line_length": 44.96212121212121,
"alnum_prop": 0.5745577085088458,
"repo_name": "eqy/vroMAD",
"id": "dffadac6639d23ed58a5fdd18de0379fc2e577de",
"size": "5935",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GameProcessor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23825"
}
],
"symlink_target": ""
} |
"""Imports for Python API.
This file is MACHINE GENERATED! Do not edit.
Generated by: tensorflow/tools/api/generator/create_python_api.py script.
"""
from tensorflow.python.saved_model.loader import load
from tensorflow.python.saved_model.loader import maybe_saved_model_directory | {
"content_hash": "3cdf90fcc05d57889b66fe3b5b9fa3ac",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 76,
"avg_line_length": 40.142857142857146,
"alnum_prop": 0.8078291814946619,
"repo_name": "ryfeus/lambda-packs",
"id": "80f566aded94c6ecfecf065db3467e27cdd62314",
"size": "281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/saved_model/loader/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
from get_data import get_data
import os
import xml.etree.ElementTree as ET
import collections
FMI_API_KEY = os.environ['FMI_API_KEY']
weather_now_url = "http://data.fmi.fi/fmi-apikey/" + FMI_API_KEY + "/wfs?request=getFeature&storedquery_id=fmi::observations::weather::simple&place="
def parse_weather_data(location):
data = get_data(weather_now_url, location, "get_weather_data")
root = ET.fromstring(data)
weather_dictionary = collections.OrderedDict()
for member in root.iter('{http://www.opengis.net/wfs/2.0}member'):
for BsWfsElement in member.iter('{http://xml.fmi.fi/schema/wfs/2.0}BsWfsElement'):
for ParameterName in BsWfsElement.iter('{http://xml.fmi.fi/schema/wfs/2.0}ParameterName'):
parameter_name = ParameterName.text
for ParameterValue in BsWfsElement.iter('{http://xml.fmi.fi/schema/wfs/2.0}ParameterValue'):
parameter_value = ParameterValue.text
weather_dictionary[parameter_name] = parameter_value
return weather_dictionary
| {
"content_hash": "21a225fdf9505881501b7565166de64a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 149,
"avg_line_length": 36.172413793103445,
"alnum_prop": 0.6930409914204004,
"repo_name": "Mikrobitti/6-17-saabotti",
"id": "b241dba8bc13726e3a0841623744ab378efe16b9",
"size": "1274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parse_weather_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4205"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name='django-email-from-template',
description="Send emails generated entirely from Django templates.",
version='2.4.1',
url='https://chris-lamb.co.uk/projects/django-email-from-template',
author="Chris Lamb",
author_email='chris@chris-lamb.co.uk',
license="BSD",
packages=find_packages(),
package_data={'': [
'templates/*/*',
]},
install_requires=(
'Django>=1.8',
),
)
| {
"content_hash": "8cbe8593f72f2db9a850b36c0b51d4a3",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 72,
"avg_line_length": 23.285714285714285,
"alnum_prop": 0.623721881390593,
"repo_name": "lamby/django-email-from-template",
"id": "d0630924aba5ea793a1fb30bfd3d32e1d28833b9",
"size": "513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "11355"
}
],
"symlink_target": ""
} |
import os
import sys
import tempfile
from zipfile import ZipFile, ZIP_DEFLATED
from datetime import datetime
import time
from jinja2 import Environment, FileSystemLoader
from . import Color
if getattr(sys, "frozen", False):
_basedir = os.path.join(sys._MEIPASS, "pyexcelerate")
else:
_basedir = os.path.dirname(__file__)
_TEMPLATE_PATH = os.path.join(_basedir, "templates")
class Writer(object):
env = Environment(loader=FileSystemLoader(_TEMPLATE_PATH), auto_reload=False)
_docProps_app_template = env.get_template("docProps/app.xml")
_docProps_core_template = env.get_template("docProps/core.xml")
_content_types_template = env.get_template("[Content_Types].xml")
_rels_template = env.get_template("_rels/.rels")
_styles_template = env.get_template("xl/styles.xml")
_empty_styles_template = env.get_template("xl/styles.empty.xml")
_workbook_template = env.get_template("xl/workbook.xml")
_workbook_rels_template = env.get_template("xl/_rels/workbook.xml.rels")
_worksheet_template = env.get_template("xl/worksheets/sheet.xml")
_vbaProject_bin_file = os.path.join(_TEMPLATE_PATH, "xl/vbaProject.bin")
__slots__ = ("workbook",)
def __init__(self, workbook):
self.workbook = workbook
def _render_template_wb(self, template, extra_context=None):
context = {"workbook": self.workbook}
if extra_context:
context.update(extra_context)
return template.render(context).encode("utf-8")
def _get_utc_now(self):
now = datetime.utcnow()
return now.strftime("%Y-%m-%dT%H:%M:00Z")
def save(self, file, **kwargs):
zf = ZipFile(file, "w", ZIP_DEFLATED, **kwargs)
zf.writestr(
"docProps/app.xml", self._render_template_wb(self._docProps_app_template)
)
zf.writestr(
"docProps/core.xml",
self._render_template_wb(
self._docProps_core_template, {"date": self._get_utc_now()}
),
)
zf.writestr(
"[Content_Types].xml",
self._render_template_wb(self._content_types_template),
)
zf.writestr("_rels/.rels", self._rels_template.render().encode("utf-8"))
if self.workbook.has_styles:
zf.writestr(
"xl/styles.xml", self._render_template_wb(self._styles_template)
)
else:
zf.writestr(
"xl/styles.xml", self._render_template_wb(self._empty_styles_template)
)
if self.workbook.has_macros:
zf.write(self._vbaProject_bin_file, "xl/vbaProject.bin")
zf.writestr(
"xl/workbook.xml", self._render_template_wb(self._workbook_template)
)
zf.writestr(
"xl/_rels/workbook.xml.rels",
self._render_template_wb(self._workbook_rels_template),
)
for index, sheet in self.workbook.get_xml_data():
sheetStream = self._worksheet_template.generate({"worksheet": sheet})
try:
with zf.open("xl/worksheets/sheet%s.xml" % (index), mode="w", force_zip64=True) as f:
for s in sheetStream:
f.write(s.encode("utf-8"))
except RuntimeError as e:
print("received error when writing zip file", e)
tfd, tfn = tempfile.mkstemp()
tf = os.fdopen(tfd, "wb")
for s in sheetStream:
tf.write(s.encode("utf-8"))
tf.close()
zf.write(tfn, "xl/worksheets/sheet%s.xml" % (index))
os.remove(tfn)
zf.close()
| {
"content_hash": "218f21dde51fa56f9301fc4d8ee60b80",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 101,
"avg_line_length": 39.68478260869565,
"alnum_prop": 0.5897014516570802,
"repo_name": "kz26/PyExcelerate",
"id": "8302037892ce397e1167601ad092200cccd250d5",
"size": "3651",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "pyexcelerate/Writer.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "90006"
}
],
"symlink_target": ""
} |
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import unittest
import geojson
class FeaturesTest(unittest.TestCase):
def test_protocol(self):
"""
A dictionary can satisfy the protocol
"""
f = {
'type': 'Feature',
'id': '1',
'geometry': {'type': 'Point', 'coordinates': [53.0, -4.0]},
'properties': {'title': 'Dict 1'},
}
json = geojson.dumps(f, sort_keys=True)
self.assertEqual(json, '{"geometry":'
' {"coordinates": [53.0, -4.0],'
' "type": "Point"},'
' "id": "1",'
' "properties": {"title": "Dict 1"},'
' "type": "Feature"}')
o = geojson.loads(json)
output = geojson.dumps(o, sort_keys=True)
self.assertEqual(output, '{"geometry":'
' {"coordinates": [53.0, -4.0],'
' "type": "Point"},'
' "id": "1",'
' "properties": {"title": "Dict 1"},'
' "type": "Feature"}')
def test_unicode_properties(self):
with open("tests/data.geojson") as file_:
obj = geojson.load(file_)
geojson.dump(obj, StringIO())
def test_feature_class(self):
"""
Test the Feature class
"""
from geojson.examples import SimpleWebFeature
feature = SimpleWebFeature(
id='1',
geometry={'type': 'Point', 'coordinates': [53.0, -4.0]},
title='Feature 1', summary='The first feature',
link='http://example.org/features/1'
)
# It satisfies the feature protocol
self.assertEqual(feature.id, '1')
self.assertEqual(feature.properties['title'], 'Feature 1')
self.assertEqual(feature.properties['summary'], 'The first feature')
self.assertEqual(feature.properties['link'],
'http://example.org/features/1')
self.assertEqual(geojson.dumps(feature.geometry, sort_keys=True),
'{"coordinates": [53.0, -4.0], "type": "Point"}')
# Encoding
json = ('{"geometry": {"coordinates": [53.0, -4.0],'
' "type": "Point"},'
' "id": "1",'
' "properties":'
' {"link": "http://example.org/features/1",'
' "summary": "The first feature",'
' "title": "Feature 1"},'
' "type": "Feature"}')
self.assertEqual(geojson.dumps(feature, sort_keys=True), json)
# Decoding
factory = geojson.examples.create_simple_web_feature
json = ('{"geometry": {"type": "Point",'
' "coordinates": [53.0, -4.0]},'
' "id": "1",'
' "properties": {"summary": "The first feature",'
' "link": "http://example.org/features/1",'
' "title": "Feature 1"}}')
feature = geojson.loads(json, object_hook=factory)
self.assertEqual(repr(type(feature)),
"<class 'geojson.examples.SimpleWebFeature'>")
self.assertEqual(feature.id, '1')
self.assertEqual(feature.properties['title'], 'Feature 1')
self.assertEqual(feature.properties['summary'], 'The first feature')
self.assertEqual(feature.properties['link'],
'http://example.org/features/1')
self.assertEqual(geojson.dumps(feature.geometry, sort_keys=True),
'{"coordinates": [53.0, -4.0], "type": "Point"}')
def test_geo_interface(self):
class Thingy(object):
def __init__(self, id, title, x, y):
self.id = id
self.title = title
self.x = x
self.y = y
@property
def __geo_interface__(self):
return ({"id": self.id,
"properties": {"title": self.title},
"geometry": {"type": "Point",
"coordinates": (self.x, self.y)}})
ob = Thingy('1', 'thingy one', -106.0, 40.0)
self.assertEqual(geojson.dumps(ob.__geo_interface__['geometry'],
sort_keys=True),
'{"coordinates": [-106.0, 40.0], "type": "Point"}')
self.assertEqual(geojson.dumps(ob, sort_keys=True),
('{"geometry": {"coordinates": [-106.0, 40.0],'
' "type": "Point"},'
' "id": "1",'
' "properties": {"title": "thingy one"}}'))
| {
"content_hash": "77495aede992c184f61aa003edb2b1f6",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 76,
"avg_line_length": 40.57142857142857,
"alnum_prop": 0.4587821043910522,
"repo_name": "frewsxcv/python-geojson",
"id": "e2b56e9bd8f86b2066207b24e662f6b023e64c89",
"size": "4828",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_features.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "53414"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import unittest
from django.core.exceptions import ValidationError
from django_social import Source
from django_social.facebook.api import get_location_info
from django_social.facebook.api import search_places
class SocialTestCase(unittest.TestCase):
def test_facebook_get_location_by_id(self):
"""
Test for getting a location from facebook by it's ID.
"""
location = get_location_info(location_id=120491747748)
self.assertEqual(location.name, 'The Classic Cup')
self.assertEqual(location.category, 'Restaurant/cafe')
self.assertEqual(location.country, 'United States')
self.assertEqual(location.latlong, (39.042173020445, -94.590903251913))
self.assertEqual(location.line1, '301 W. 47th Street')
self.assertEqual(location.line2, None)
self.assertEqual(location.locality, 'Kansas City')
self.assertEqual(location.phone, '816-753-1840')
self.assertEqual(location.postal_code, '64112')
self.assertEqual(location.source, Source.FACEBOOK)
self.assertEqual(location.subdivision, 'MO')
def test_places_search(self):
"""
Test multiple places search from facebook.
"""
with self.assertRaises(ValidationError) as e:
search_places()
self.assertEqual([u'One of the following args must be provided: query, latitude and longitude, or distance.'],
e.exception.messages)
# Sometimes facebook gives back incorrect page sizes. If I ask for 6,
# I don't always get 6.
places = search_places(query='coffee',
latitude=39.042173020445,
longitude=-94.590903251913,
distance=1000,
page_size=6)
self.assertTrue(len(places) > 1)
| {
"content_hash": "82a6573f8b64e5b8bf9f6865f2754dbf",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 118,
"avg_line_length": 39.916666666666664,
"alnum_prop": 0.6383089770354906,
"repo_name": "InfoAgeTech/django-social",
"id": "8d0c95a31362c0f90bf82379a73a75fdc2c2abdc",
"size": "1916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_social/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17872"
}
],
"symlink_target": ""
} |
import os
import sys
import xbmc
import urllib,urllib2
import xbmcvfs
import xbmcaddon
import xbmcgui,xbmcplugin,shutil
from zipfile import ZipFile
from cStringIO import StringIO
import uuid
__addon__ = xbmcaddon.Addon()
__author__ = __addon__.getAddonInfo('author')
__scriptid__ = __addon__.getAddonInfo('id')
__scriptname__ = __addon__.getAddonInfo('name')
__version__ = __addon__.getAddonInfo('version')
__language__ = __addon__.getLocalizedString
__cwd__ = xbmc.translatePath( __addon__.getAddonInfo('path') ).decode("utf-8")
__profile__ = xbmc.translatePath( __addon__.getAddonInfo('profile') ).decode("utf-8")
__resource__ = xbmc.translatePath( os.path.join( __cwd__, 'resources', 'lib' ) ).decode("utf-8")
__temp__ = xbmc.translatePath( os.path.join( __profile__, 'temp', '') ).decode("utf-8")
sys.path.append (__resource__)
from pn_utilities import PNServer, log, OpensubtitlesHash, normalizeString, languageTranslate, calculateSublightHash
def Search( item ):
pn_server = PNServer()
pn_server.Create()
subtitles_list = []
if item['temp'] :
item['OShash'] = "000000000000"
item['SLhash'] = "000000000000"
else:
item['OShash'] = OpensubtitlesHash(item)
item['SLhash'] = calculateSublightHash(item['file_original_path'])
log( __scriptid__ ,"xbmc module OShash")
log( __scriptid__ ,"Search for [%s] by name" % (os.path.basename( item['file_original_path'] ),))
subtitles_list = pn_server.SearchSubtitlesWeb(item)
if subtitles_list:
for it in subtitles_list:
listitem = xbmcgui.ListItem(label=it["language_name"],
label2=it["filename"],
iconImage=it["rating"],
thumbnailImage=it["language_flag"]
)
listitem.setProperty( "sync", ("false", "true")[it["sync"]] )
listitem.setProperty( "hearing_imp", ("false", "true")[it["hearing_imp"]] )
url = "plugin://%s/?action=download&link=%s&filename=%s&movie_id=%s&season=%s&episode=%s&hash=%s&match=%s" %(__scriptid__,
it["link"],
it["filename"],
it["movie_id"],
it["season"],
it["episode"],
item['OShash'],
it["sync"]
)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=url,listitem=listitem,isFolder=False)
def Download(params):
if xbmcvfs.exists(__temp__):
shutil.rmtree(__temp__)
xbmcvfs.mkdirs(__temp__)
subtitle_list = []
pn_server = PNServer()
pn_server.Create()
url = pn_server.Download(params)
try:
log( __scriptid__ ,"Extract using 'ZipFile' method")
response = urllib2.urlopen(url)
raw = response.read()
archive = ZipFile(StringIO(raw), 'r')
files = archive.namelist()
files.sort()
index = 1
for file in files:
contents = archive.read(file)
extension = file[file.rfind('.') + 1:]
if len(files) == 1:
dest = os.path.join(__temp__, "%s.%s" %(str(uuid.uuid4()), extension))
else:
dest = os.path.join(__temp__, "%s.%d.%s" %(str(uuid.uuid4()), index, extension))
f = open(dest, 'wb')
f.write(contents)
f.close()
subtitle_list.append(dest)
index += 1
except:
log( __scriptid__ ,"Extract using 'XBMC.Extract' method")
exts = [".srt", ".sub", ".txt", ".smi", ".ssa", ".ass" ]
zip = os.path.join( __temp__, "PN.zip")
f = urllib.urlopen(url)
with open(zip, "wb") as subFile:
subFile.write(f.read())
subFile.close()
xbmc.sleep(500)
xbmc.executebuiltin(('XBMC.Extract("%s","%s")' % (zip,__temp__,)).encode('utf-8'), True)
for subfile in xbmcvfs.listdir(zip)[1]:
file = os.path.join(__temp__, subfile.decode('utf-8'))
if (os.path.splitext( file )[1] in exts):
subtitle_list.append(file)
return subtitle_list
def get_params(string=""):
param=[]
if string == "":
paramstring=sys.argv[2]
else:
paramstring=string
if len(paramstring)>=2:
params=paramstring
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
params = get_params()
if params['action'] == 'search':
log( __scriptid__, "action 'search' called")
item = {}
item['temp'] = False
item['rar'] = False
item['year'] = xbmc.getInfoLabel("VideoPlayer.Year") # Year
item['season'] = str(xbmc.getInfoLabel("VideoPlayer.Season")) # Season
item['episode'] = str(xbmc.getInfoLabel("VideoPlayer.Episode")) # Episode
item['tvshow'] = normalizeString(xbmc.getInfoLabel("VideoPlayer.TVshowtitle")) # Show
item['title'] = normalizeString(xbmc.getInfoLabel("VideoPlayer.OriginalTitle"))# try to get original title
item['file_original_path'] = urllib.unquote(xbmc.Player().getPlayingFile().decode('utf-8'))# Full path of a playing file
item['3let_language'] = [] #['scc','eng']
for lang in urllib.unquote(params['languages']).decode('utf-8').split(","):
item['3let_language'].append(languageTranslate(lang,0,1))
if item['title'] == "":
log( __scriptid__, "VideoPlayer.OriginalTitle not found")
item['title'] = normalizeString(xbmc.getInfoLabel("VideoPlayer.Title")) # no original title, get just Title
if item['episode'].lower().find("s") > -1: # Check if season is "Special"
item['season'] = "0" #
item['episode'] = item['episode'][-1:]
if ( item['file_original_path'].find("http") > -1 ):
item['temp'] = True
elif ( item['file_original_path'].find("rar://") > -1 ):
item['rar'] = True
item['file_original_path'] = os.path.dirname(item['file_original_path'][6:])
elif ( item['file_original_path'].find("stack://") > -1 ):
stackPath = item['file_original_path'].split(" , ")
item['file_original_path'] = stackPath[0][8:]
Search(item)
elif params['action'] == 'download':
subs = Download(params)
for sub in subs:
listitem = xbmcgui.ListItem(label=sub)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=sub,listitem=listitem,isFolder=False)
elif params['action'] == 'manualsearch':
xbmc.executebuiltin(u'Notification(%s,%s,2000,%s)' %(__scriptname__,
__language__(32004),
os.path.join(__cwd__,"icon.png")
)
)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
| {
"content_hash": "df014e9a5ad5b06125f94239647dc1b3",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 129,
"avg_line_length": 39.28358208955224,
"alnum_prop": 0.5073454913880446,
"repo_name": "aplicatii-romanesti/allinclusive-kodi-pi",
"id": "ac8a7b0f6855a5621473797cbf7b3ebec4a1d1c9",
"size": "7922",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": ".kodi/addons/service.subtitles.podnapisi/service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6178"
},
{
"name": "Python",
"bytes": "8657978"
},
{
"name": "Shell",
"bytes": "198"
}
],
"symlink_target": ""
} |
"""
The resume module
=================
This module defines - among other things - the views responsible for exposing personal
information.
"""
def init_app(app, **kwargs):
""" Performs app-initialization operations related to the current module. """
# Registers blueprints.
from . import views
app.register_blueprint(views.resume_blueprint)
# Registers context processors
from . import context_processors
app.context_processor(context_processors.google_metadata)
| {
"content_hash": "4d25ddf67433c4ed5f353637ca2ef7fb",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 90,
"avg_line_length": 27,
"alnum_prop": 0.6881091617933723,
"repo_name": "ellmetha/morganaubert-resume",
"id": "aa4ee8fd76aa61d159b0865d282204e2e2304b55",
"size": "513",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "main/modules/resume/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "34455"
},
{
"name": "JavaScript",
"bytes": "12441"
},
{
"name": "Makefile",
"bytes": "3663"
},
{
"name": "Python",
"bytes": "11149"
},
{
"name": "SCSS",
"bytes": "13220"
},
{
"name": "Shell",
"bytes": "56"
}
],
"symlink_target": ""
} |
from collections import defaultdict
from .state import State
from ..messages.append_entries import AppendEntriesMessage
class Leader(State):
def __init__(self):
self._nextIndexes = defaultdict(int)
self._matchIndex = defaultdict(int)
def set_sever(self, server):
self._sever = server
self._send_heart_beat()
for n in self._server._neighbors:
self._nextIndexes[n._name] = self._server._lastLogIndex + 1
self._matchIndex[n._name] = 0
def on_response_received(self, message):
# Was the last AppendEntries good?
if(not message.data["response"]):
# No, so lets back up the log for this node
self._nextIndexes[message.sender] -= 1
# Get the next log entry to send to the client.
previousIndex = max(0, self._nextIndexes[message.sender] - 1)
previous = self._server._log[previousIndex]
current = self._server._log[self._nextIndexes[message.sender]]
# Send the new log to the client and wait for it to respond.
appendEntry = AppendEntriesMessage(
self._server._name,
message.sender,
self._server._currentTerm,
{
"leaderId": self._server._name,
"prevLogIndex": previousIndex,
"prevLogTerm": previous["term"],
"entries": [current],
"leaderCommit": self._server._commitIndex,
})
self._send_response_message(appendEntry)
else:
# The last append was good so increase their index.
self._nextIndexes[message.sender] += 1
# Are they caught up?
if(self._nextIndexes[message.sender] > self._server._lastLogIndex):
self._nextIndexes[message.sender] = self._server._lastLogIndex
return self, None
def _send_heart_beat(self):
message = AppendEntriesMessage(
self._server._name,
None,
self._server._currentTerm,
{
"leaderId": self._server._name,
"prevLogIndex": self._server._lastLogIndex,
"prevLogTerm": self._server._lastLogTerm,
"entries": [],
"leaderCommit": self._server._commitIndex,
})
self._server.send_message(message)
| {
"content_hash": "a476118e14c9016c3a37f9d0164f05b0",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 79,
"avg_line_length": 36.38805970149254,
"alnum_prop": 0.5557834290401968,
"repo_name": "streed/simpleRaft",
"id": "76395d4a2afdfc9ad5284307c942b41aa8a9e1fd",
"size": "2438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simpleRaft/states/leader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26921"
}
],
"symlink_target": ""
} |
'''
This is a sample code to demonstrate how to use the TensorFlow custom op with
FasterTransformer library in encoder.
This sample code builds a BERT transformer model by TensorFlow and TensorFlow
custom op. Then compare the maximum difference of them to verify the correctness
of FasterTransformer.
Users are also able to use this sample code to test the average forward time of
TensorFlow and FasterTransformer.
'''
import argparse
import copy
import numpy as np
import tensorflow as tf
import threading
import os
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
from examples.tensorflow.encoder.utils.encoder import build_sequence_mask
from examples.tensorflow.encoder.utils.encoder import ft_encoder_opennmt
from examples.tensorflow.encoder.utils.encoder import tf_encoder_opennmt
from examples.tensorflow.common_utils.common import cross_check
from examples.tensorflow.common_utils.common import time_test
from examples.tensorflow.common_utils.common import TransformerArgument
def encoder_example(args_dict):
print("\n=============== Argument ===============")
for key in args_dict:
print("{}: {}".format(key, args_dict[key]))
print("========================================")
np.random.seed(1)
tf.set_random_seed(1)
batch_size = args_dict['batch_size']
num_layer = args_dict['num_layer']
max_seq_len = args_dict['max_seq_len']
avg_seq_len = args_dict['avg_seq_len']
head_num = args_dict['head_number']
size_per_head = args_dict['size_per_head']
inter_size = args_dict['inter_size']
if inter_size == 0:
inter_size = head_num * size_per_head * 4
tf_datatype = tf.float32
np_datatype = np.float32
atol_threshold = 3e-5
allow_gemm_test = True if args_dict['allow_gemm_test'].lower() == "true" else False
if args_dict['data_type'] == "fp16":
tf_datatype = tf.float16
np_datatype = np.float16
atol_threshold = 3e-2
hidden_dim = head_num * size_per_head
sequence_length = np.random.randint(1, max_seq_len + 1, size=batch_size)
if avg_seq_len != -1:
# This means we use "remove_padding" and set other average sequence length
sequence_length = np.ones(batch_size) * avg_seq_len
else:
sequence_length = np.ones(batch_size) * (max_seq_len / 2)
sequence_length = sequence_length.astype(np.int32)
from_data = np.random.randn(batch_size, max_seq_len, hidden_dim)
from_tensor = tf.convert_to_tensor(from_data, dtype=tf_datatype)
attention_mask = build_sequence_mask(sequence_length, num_heads=head_num,
maximum_length=max_seq_len, dtype=tf_datatype)
encoder_args = TransformerArgument(beam_width=1,
head_num=head_num,
size_per_head=size_per_head,
inter_size=inter_size,
num_layer=num_layer,
dtype=tf_datatype,
remove_padding=False,
allow_gemm_test=allow_gemm_test)
eff_encoder_args = copy.deepcopy(encoder_args)
eff_encoder_args.remove_padding = True
with tf.variable_scope("transformer/encoder", reuse=tf.AUTO_REUSE):
tf_encoder_result = tf_encoder_opennmt(input_tensor=from_tensor,
encoder_args=encoder_args,
sequence_length=sequence_length)
encoder_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
encoder_variables_dict = {}
for v in encoder_vars:
encoder_variables_dict[v.name] = v
op_encoder_result = ft_encoder_opennmt(inputs=from_tensor,
encoder_args=encoder_args,
encoder_vars_dict=encoder_variables_dict,
sequence_length=sequence_length)
eff_encoder_result = ft_encoder_opennmt(inputs=from_tensor,
encoder_args=eff_encoder_args,
encoder_vars_dict=encoder_variables_dict,
sequence_length=sequence_length)
'''
Because FasterTransformer skip some computation for the padding parts,
if we do not mask these parts, the cross check result would be wrong.
'''
# Prevent nan since we will skip to write the data to some position, and these positions may be dirty.
eff_encoder_result = tf.where(tf.is_nan(eff_encoder_result), tf.zeros_like(eff_encoder_result), eff_encoder_result)
tf_encoder_result = tf_encoder_result * \
tf.expand_dims(tf.sequence_mask(sequence_length, maxlen=max_seq_len, dtype=tf_datatype), axis=-1)
op_encoder_result = op_encoder_result * \
tf.expand_dims(tf.sequence_mask(sequence_length, maxlen=max_seq_len, dtype=tf_datatype), axis=-1)
eff_encoder_result = eff_encoder_result * \
tf.expand_dims(tf.sequence_mask(sequence_length, maxlen=max_seq_len, dtype=tf_datatype), axis=-1)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
for idx, name in enumerate(encoder_variables_dict):
print((str(idx) + " " + str(name) + " " +
str(encoder_variables_dict[name].shape)) + " " + str(encoder_variables_dict[name].dtype))
print("#################################")
tf_encoder_result_val = sess.run(tf_encoder_result)
op_encoder_result_val = sess.run(op_encoder_result)
eff_encoder_result_val = sess.run(eff_encoder_result)
cross_check("Encoder TF v.s. FT with tensor input",
tf_encoder_result_val, op_encoder_result_val, atol_threshold)
cross_check("Encoder TF v.s. EFF-FT with tensor input",
tf_encoder_result_val, eff_encoder_result_val, atol_threshold)
op_diff = abs(tf_encoder_result_val.reshape([-1]) - op_encoder_result_val.reshape([-1]))
eff_diff = abs(tf_encoder_result_val.reshape([-1]) - eff_encoder_result_val.reshape([-1]))
max_diff = max(op_diff.max(), eff_diff.max())
max_diff = op_diff.max()
ite = 50
def _cond(from_tensor):
return tf.constant(True)
def _ft_body(from_tensor):
op_encoder_result = ft_encoder_opennmt(inputs=from_tensor,
encoder_args=encoder_args,
encoder_vars_dict=encoder_variables_dict,
sequence_length=sequence_length)
return op_encoder_result
def _eff_body(from_tensor):
eff_encoder_result = ft_encoder_opennmt(inputs=from_tensor,
encoder_args=eff_encoder_args,
encoder_vars_dict=encoder_variables_dict,
sequence_length=sequence_length)
return eff_encoder_result
def _tf_body(from_tensor):
tf_encoder_result = tf_encoder_opennmt(input_tensor=from_tensor,
encoder_args=encoder_args,
sequence_length=sequence_length)
return tf_encoder_result
tf_while_tensor = tf.while_loop(_cond,
_tf_body,
loop_vars=[from_tensor],
back_prop=False,
maximum_iterations=ite)
ft_while_tensor = tf.while_loop(_cond,
_ft_body,
loop_vars=[from_tensor],
back_prop=False,
maximum_iterations=ite)
eff_while_tensor = tf.while_loop(_cond,
_eff_body,
loop_vars=[from_tensor],
back_prop=False,
maximum_iterations=ite)
if args_dict['test_time'] == 1:
# Using while loop to run 'ite' times to ignore the overheads of memory copy and model preprocess.
# We use these times as the profiling results.
tf_while_time = time_test(sess, tf_while_tensor, 1) / ite # while_loop has run ite times
# time.sleep(60)
ft_while_time = time_test(sess, ft_while_tensor, 1) / ite # while_loop has run ite times
# time.sleep(60)
eff_while_time = time_test(sess, eff_while_tensor, 1) / ite # while_loop has run ite times
# time.sleep(60)
ft_type = args_dict['data_type'].upper()
print("[INFO] batch_size {} max_seq_len {} precision {} {} layer TF-while-time {:6.2f} ms ( {} iterations)".format(
batch_size, max_seq_len, args_dict['data_type'].upper(), num_layer, tf_while_time, ite))
print("[INFO] batch_size {} max_seq_len {} precision {} {} layer FT-OP-while-time {:6.2f} ms ( {} iterations)".format(
batch_size, max_seq_len, ft_type, num_layer, ft_while_time, ite))
print("[INFO] batch_size {} max_seq_len {} precision {} {} layer EFF-OP-while-time {:6.2f} ms ( {} iterations)".format(
batch_size, max_seq_len, ft_type, num_layer, eff_while_time, ite))
if args_dict['thread_num'] > 1:
# Multi-threading demonstration
thread_list = []
thread_num = args_dict['thread_num']
def run():
ft_while_time = time_test(sess, ft_while_tensor, 1) / ite # while_loop has run ite times
print("[INFO] batch_size {} max_seq_len {} {} layer FT-OP-while-time {:6.2f} ms with {} threads".format(batch_size,
max_seq_len, num_layer, ft_while_time, thread_num))
for i in range(thread_num):
thread_list.append(threading.Thread(target=run, name="RunFT"))
for t in thread_list:
t.start()
for t in thread_list:
t.join()
return max_diff
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-batch', '--batch_size', type=int, default=4, metavar='NUMBER',
help='batch size (default: 4)')
parser.add_argument('-l', '--num_layer', type=int, default=12, metavar='NUMBER',
help='number of layers (default: 12)')
parser.add_argument('-s', '--max_seq_len', type=int, default=32, metavar='NUMBER',
help='max sequence length (default: 32)')
parser.add_argument('-n', '--head_number', type=int, default=12, metavar='NUMBER',
help='head number (default: 12)')
parser.add_argument('-size', '--size_per_head', type=int, default=64, metavar='NUMBER',
help='size per head (default: 64)')
parser.add_argument('-inter_size', '--inter_size', type=int, default=0, metavar='NUMBER',
help='inter_size (default: 0)')
parser.add_argument('-d', '--data_type', type=str, default="fp32", metavar='STRING',
help='data type (default: fp32)', choices=['fp32', 'fp16'])
parser.add_argument('-allow_gemm_test', '--allow_gemm_test', type=str, default="False", metavar='BOOL',
help='whether allow gemm test inside FT (default: False)', choices=["True", "False"])
parser.add_argument('-time', '--test_time', type=int, default=0, metavar='BOOL',
help='test the time or not. (default: False (0)), True is 1.',
choices=[0, 1])
parser.add_argument('-avg_seq', '--avg_seq_len', type=int, default=-1, metavar='NUMBER',
help='average sequence length (default: -1)')
parser.add_argument('-thread_num', '--thread_num', type=int, default=1, metavar='int',
help='Testing multithread if thread_num > 1.')
args = parser.parse_args()
encoder_example(vars(args))
| {
"content_hash": "1adf25aac76b91e7a5d44036711fb50e",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 171,
"avg_line_length": 49.937254901960785,
"alnum_prop": 0.5528506360923512,
"repo_name": "NVIDIA/FasterTransformer",
"id": "b894ac9155595b85ceb05f219cdea1acd99050ce",
"size": "13350",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/tensorflow/encoder/encoder_example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2444"
},
{
"name": "C++",
"bytes": "3361167"
},
{
"name": "CMake",
"bytes": "117845"
},
{
"name": "Cuda",
"bytes": "1734491"
},
{
"name": "HCL",
"bytes": "1482"
},
{
"name": "Python",
"bytes": "73804"
},
{
"name": "Shell",
"bytes": "46724"
}
],
"symlink_target": ""
} |
__all__ = [ "SharedLock" ]
################################################################################
from threading import Lock, currentThread, Event
from random import randint
from exc_string import trace_string
if not hasattr(__builtins__, "sorted"):
def sorted(seq):
result = [ x for x in seq ]
result.sort()
return result
################################################################################
class SharedLock(object):
def __init__(self, log = None, debug = False):
"""
Takes two optional parameters, (1) log is an external log function the
lock would use to send its messages to, ex: lambda s: xprint(s),
(2) debug is a boolean value, if it's True the lock would be checking
its internal invariant before and after each call.
"""
self.__log, self.__debug, self.lckLock = log, debug, Lock()
self.thrOwner, self.intOwnerDepth, self.dicUsers = None, 0, {}
self.lstOwners, self.lstUsers, self.lstPooledEvents = [], [], []
################################### utility log function
def _log(self, s):
thrCurrent = currentThread()
self.__log("%s @ %.08x %s %s @ %.08x in %s"
% (thrCurrent.getName(), id(thrCurrent), s,
self._debug_dump(), id(self), trace_string()))
################################### debugging lock state dump
def _debug_dump(self):
return "SharedLock(Ex:[%s] (%s), Sh:[%s] (%s))" \
% (self.thrOwner is not None
and "%s:%d" % (self.thrOwner.getName(),
self.intOwnerDepth)
or "",
", ".join([ "%s:%d" % (th.getName(), dp)
for th, evt, dp in self.lstOwners ]),
", ".join(sorted([ "%s:%d" % (th.getName(), dp)
for th, dp in self.dicUsers.iteritems() ])),
", ".join([ "%s:%d" % (th.getName(), dp)
for th, evt, dp in self.lstUsers ]))
def debug_dump(self):
"""
Returns a printable string describing the current lock state.
"""
self._lock()
try:
return self._debug_dump()
finally:
self._unlock()
################################### utility predicates
def _has_owner(self):
return self.thrOwner is not None
def _has_pending_owners(self):
return len(self.lstOwners) > 0
def _has_users(self):
return len(self.dicUsers) > 0
def _has_pending_users(self):
return len(self.lstUsers) > 0
################################### lock invariant
def _invariant(self): # invariant checks slow down the lock a lot (~3 times)
# a single thread can hold both shared and exclusive lock
# as soon as it's the only thread holding either
if self._has_owner() and self._has_users() \
and self.dicUsers.keys() != [self.thrOwner]:
return False
# if noone is holding the lock, noone should be pending on it
if not self._has_owner() and not self._has_users():
return not self._has_pending_owners() \
and not self._has_pending_users()
# noone can be holding a lock zero times and vice versa
if (self._has_owner() and self.intOwnerDepth <= 0) \
or (not self._has_owner() and self.intOwnerDepth > 0):
return False
if len(filter(lambda dp: dp <= 0, self.dicUsers.values())) > 0:
return False
# if there is no owner nor pending owners, there should be no
# pending users (all users must be executing)
if not self._has_owner() and not self._has_pending_owners() \
and self._has_pending_users():
return False
# if there is no owner nor running users, there should be no
# pending owners (an owner must be executing)
if not self._has_owner() and not self._has_users() \
and self._has_pending_owners():
return False
# a thread may be pending on a lock only once, either as user or as owner
lstPendingThreads = sorted(map(lambda t: t[0], self.lstUsers) +
map(lambda t: t[0], self.lstOwners))
for i in range(len(lstPendingThreads) - 1):
if lstPendingThreads[i] is lstPendingThreads[i+1]:
return False
return True
################################### instance lock
def _lock(self):
self.lckLock.acquire()
def _unlock(self):
self.lckLock.release()
################################### sleep/wakeup event pool
def _pick_event(self): # events are pooled/recycled
if len(self.lstPooledEvents): # because creating and then
return self.lstPooledEvents.pop(0) # garbage collecting kernel
else: # objects on each call could
return Event() # be prohibitively expensive
def _unpick_event(self, _evtEvent):
self.lstPooledEvents.append(_evtEvent)
################################### sleep/wakeup utility
def _acquire_event(self, _evtEvent, timeout): # puts the thread to sleep until the
# lock is acquired or timeout elapses
if timeout is None:
_evtEvent.wait()
result = True
else:
_evtEvent.wait(timeout)
result = _evtEvent.isSet()
thrCurrent = currentThread()
self._lock()
try:
# even if result indicates failure, the thread might still be having
# the lock (race condition between the isSet() and _lock() above)
if not result:
result = _evtEvent.isSet()
# if the lock has not been acquired, the thread must be removed from
# the pending list it's on. in case the thread was waiting for the
# exclusive lock and it previously had shared locks, it's put to sleep
# again this time infinitely (!), waiting for its shared locks back
boolReAcquireShared = False
if not result: # the thread has failed to acquire the lock
for i, (thrUser, evtEvent, intSharedDepth) in enumerate(self.lstUsers):
if thrUser is thrCurrent and evtEvent is _evtEvent:
assert intSharedDepth == 1
del self.lstUsers[i]
break
else:
for i, (thrOwner, evtEvent, intSharedDepth) in enumerate(self.lstOwners):
if thrOwner is thrCurrent and evtEvent is _evtEvent:
del self.lstOwners[i]
if intSharedDepth > 0:
if not self._has_owner():
self.dicUsers[thrCurrent] = intSharedDepth
else:
self.lstUsers.append((thrCurrent, _evtEvent, intSharedDepth))
boolReAcquireShared = True
break
else:
assert False, "Invalid thread for %s in %s" % \
(self._debug_dump(), trace_string())
# if a thread has failed to acquire a lock, it's identical as if it had
# it and then released, therefore other threads should be released now
self._release_threads()
if not boolReAcquireShared:
_evtEvent.clear()
self._unpick_event(_evtEvent)
if self.__debug:
assert self._invariant(), "SharedLock invariant failed: %s in %s" % \
(self._debug_dump(), trace_string())
if result:
if self.__log: self._log("acquired")
else:
if self.__log: self._log("timed out in %.02f second(s) waiting for" % timeout)
if boolReAcquireShared:
if self.__log: self._log("acquiring %d previously owned shared lock(s) for" % intSharedDepth)
finally:
self._unlock()
if boolReAcquireShared:
assert self._acquire_event(_evtEvent, None)
return False
return result
def _release_events(self, _lstEvents): # releases waiting thread(s)
for evtEvent in _lstEvents:
evtEvent.set()
################################### exclusive acquire
def acquire(self, timeout = None):
"""
Attempts to acquire the lock exclusively within the optional timeout.
If the timeout is not specified, waits for the lock infinitely.
Returns True if the lock has been acquired, False otherwise.
"""
thrCurrent = currentThread()
self._lock()
try:
if self.__log: self._log("acquiring exclusive")
if self.__debug:
assert self._invariant(), "SharedLock invariant failed: %s in %s" % \
(self._debug_dump(), trace_string())
# this thread already has exclusive lock, the count is incremented
if thrCurrent is self.thrOwner:
self.intOwnerDepth += 1
if self.__debug:
assert self._invariant(), "SharedLock invariant failed: %s in %s" % \
(self._debug_dump(), trace_string())
if self.__log: self._log("acquired exclusive")
return True
# this thread already has shared lock, this is the most complicated case
elif thrCurrent in self.dicUsers:
# the thread gets exclusive lock immediately if there is no other threads
if self.dicUsers.keys() == [thrCurrent] \
and not self._has_pending_users() and not self._has_pending_owners():
self.thrOwner = thrCurrent
self.intOwnerDepth = 1
if self.__debug:
assert self._invariant(), "SharedLock invariant failed: %s in %s" % \
(self._debug_dump(), trace_string())
if self.__log: self._log("acquired exclusive")
return True
# the thread releases its shared lock in hope for the future
# exclusive one
intSharedDepth = self.dicUsers.pop(thrCurrent) # that many times it had shared lock
evtEvent = self._pick_event()
self.lstOwners.append((thrCurrent, evtEvent, intSharedDepth)) # it will be given them back
self._release_threads()
# a thread acquires exclusive lock whenever there is no
# current owner nor running users
elif not self._has_owner() and not self._has_users():
self.thrOwner = thrCurrent
self.intOwnerDepth = 1
if self.__debug:
assert self._invariant(), "SharedLock invariant failed: %s in %s" % \
(self._debug_dump(), trace_string())
if self.__log: self._log("acquired exclusive")
return True
# otherwise the thread registers itself as a pending owner with no
# prior record of holding shared lock
else:
evtEvent = self._pick_event()
self.lstOwners.append((thrCurrent, evtEvent, 0))
if self.__debug:
assert self._invariant(), "SharedLock invariant failed: %s in %s" % \
(self._debug_dump(), trace_string())
if self.__log: self._log("waiting for exclusive")
finally:
self._unlock()
return self._acquire_event(evtEvent, timeout) # the thread waits for a lock release
################################### shared acquire
def acquire_shared(self, timeout = None):
"""
Attempts to acquire the lock in shared mode within the optional
timeout. If the timeout is not specified, waits for the lock
infinitely. Returns True if the lock has been acquired, False
otherwise.
"""
thrCurrent = currentThread()
self._lock()
try:
if self.__log: self._log("acquiring shared")
if self.__debug:
assert self._invariant(), "SharedLock invariant failed: %s in %s" % \
(self._debug_dump(), trace_string())
# this thread already has shared lock, the count is incremented
if thrCurrent in self.dicUsers:
self.dicUsers[thrCurrent] += 1
if self.__debug:
assert self._invariant(), "SharedLock invariant failed: %s in %s" % \
(self._debug_dump(), trace_string())
if self.__log: self._log("acquired shared")
return True
# this thread already has exclusive lock, now it also has shared
elif thrCurrent is self.thrOwner:
if thrCurrent in self.dicUsers:
self.dicUsers[thrCurrent] += 1
else:
self.dicUsers[thrCurrent] = 1
if self.__debug:
assert self._invariant(), "SharedLock invariant failed: %s in %s" % \
(self._debug_dump(), trace_string())
if self.__log: self._log("acquired shared")
return True
# a thread acquires shared lock whenever there is no owner
# nor pending owners (to prevent owners starvation)
elif not self._has_owner() and not self._has_pending_owners():
self.dicUsers[thrCurrent] = 1
if self.__debug:
assert self._invariant(), "SharedLock invariant failed: %s in %s" % \
(self._debug_dump(), trace_string())
if self.__log: self._log("acquired shared")
return True
# otherwise the thread registers itself as a pending user
else:
evtEvent = self._pick_event()
self.lstUsers.append((thrCurrent, evtEvent, 1))
if self.__debug:
assert self._invariant(), "SharedLock invariant failed: %s in %s" % \
(self._debug_dump(), trace_string())
if self.__log: self._log("waiting for shared")
finally:
self._unlock()
return self._acquire_event(evtEvent, timeout) # the thread waits for a lock release
###################################
def _release_threads(self):
# a decision is made which thread(s) to awake upon a release
if self._has_owner():
boolWakeUpOwner = False # noone to awake, the exclusive owner
boolWakeUpUsers = False # must've released its shared lock
elif not self._has_pending_owners():
boolWakeUpOwner = False
boolWakeUpUsers = self._has_pending_users()
elif not self._has_users():
boolWakeUpOwner = not self._has_pending_users() \
or randint(0, 1) == 0 # this prevents starvation
boolWakeUpUsers = self._has_pending_users() and not boolWakeUpOwner
else:
boolWakeUpOwner = False # noone to awake, running users prevent
boolWakeUpUsers = False # pending owners from running
# the winning thread(s) are released
lstEvents = []
if boolWakeUpOwner:
self.thrOwner, evtEvent, intSharedDepth = self.lstOwners.pop(0)
self.intOwnerDepth = 1
if intSharedDepth > 0:
self.dicUsers[self.thrOwner] = intSharedDepth # restore thread's shared locks
lstEvents.append(evtEvent)
elif boolWakeUpUsers:
for thrUser, evtEvent, intSharedDepth in self.lstUsers:
self.dicUsers[thrUser] = intSharedDepth
lstEvents.append(evtEvent)
del self.lstUsers[:]
self._release_events(lstEvents)
################################### exclusive release
def release(self):
"""
Releases the lock previously locked by a call to acquire().
Returns None.
"""
thrCurrent = currentThread()
self._lock()
try:
if self.__log: self._log("releasing exclusive")
if self.__debug:
assert self._invariant(), "SharedLock invariant failed: %s in %s" % \
(self._debug_dump(), trace_string())
if thrCurrent is not self.thrOwner:
raise Exception("Current thread has not acquired the lock")
# the thread releases its exclusive lock
self.intOwnerDepth -= 1
if self.intOwnerDepth > 0:
if self.__debug:
assert self._invariant(), "SharedLock invariant failed: %s in %s" % \
(self._debug_dump(), trace_string())
if self.__log: self._log("released exclusive")
return
self.thrOwner = None
# a decision is made which pending thread(s) to awake (if any)
self._release_threads()
if self.__debug:
assert self._invariant(), "SharedLock invariant failed: %s in %s" % \
(self._debug_dump(), trace_string())
if self.__log: self._log("released exclusive")
finally:
self._unlock()
################################### shared release
def release_shared(self):
"""
Releases the lock previously locked by a call to acquire_shared().
Returns None.
"""
thrCurrent = currentThread()
self._lock()
try:
if self.__log: self._log("releasing shared")
if self.__debug:
assert self._invariant(), "SharedLock invariant failed: %s in %s" % \
(self._debug_dump(), trace_string())
if thrCurrent not in self.dicUsers:
raise Exception("Current thread has not acquired the lock")
# the thread releases its shared lock
self.dicUsers[thrCurrent] -= 1
if self.dicUsers[thrCurrent] > 0:
if self.__debug:
assert self._invariant(), "SharedLock invariant failed: %s in %s" % \
(self._debug_dump(), trace_string())
if self.__log: self._log("released shared")
return
else:
del self.dicUsers[thrCurrent]
# a decision is made which pending thread(s) to awake (if any)
self._release_threads()
if self.__debug:
assert self._invariant(), "SharedLock invariant failed: %s in %s" % \
(self._debug_dump(), trace_string())
if self.__log: self._log("released shared")
finally:
self._unlock()
################################################################################
# EOF
| {
"content_hash": "3f15467f100c6388a514da57a0aa9916",
"timestamp": "",
"source": "github",
"line_count": 523,
"max_line_length": 113,
"avg_line_length": 38.03059273422562,
"alnum_prop": 0.5040723981900452,
"repo_name": "ActiveState/code",
"id": "4fcb865b501f957013c54c50fae7c1da36ab42a9",
"size": "23648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/465156_Shared_lock_akreaderwriter_lock_timeouts_FIFO/recipe-465156.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
import datetime
import os
import re
import tempfile
import urllib2
from lxml import etree
from optparse import make_option
from tempfile import gettempdir
from django.core.files import File
from django.core.management.base import BaseCommand, CommandError
from django.utils.timezone import utc
from airmozilla.main.models import Category, Event, Tag, Template
DEFAULT_VIDLY_TEMPLATE = """
<video controls width="100%" controls preload="none" poster="https://d3fenhwk93s16g.cloudfront.net/{{ tag }}/poster.jpg">
<source src="http://cf.cdn.vid.ly/{{ tag }}/mp4.mp4" type="video/mp4">
<source src="http://cf.cdn.vid.ly/{{ tag }}/webm.webm" type="video/webm">
<source src="http://cf.cdn.vid.ly/{{ tag }}/ogv.ogv" type="video/ogg">
<a target="_blank" href="http://vid.ly/{{ tag }}"><img src="https://d3fenhwk93s16g.cloudfront.net/{{ tag }}/poster.jpg" width="500" alt="Video"></a>
</video>
"""
DEFAULT_VIDLY_NAME = "Vid.ly"
DEFAULT_OGG_TEMPLATE = """
<video width="620" height="350" controls="controls">
<source src="{{ url }}" type="video/ogg" />
</video>
"""
DEFAULT_OGG_NAME = "Ogg Video"
class Command(BaseCommand):
args = '<wordpress_xml_dump.xml> <default_thumb>'
option_list = BaseCommand.option_list + (
make_option('--clear',
action='store_true',
dest='clear',
default=False,
help='Clear all events before running the migration.'),
)
nsmap = {
'wp': 'http://wordpress.org/export/1.2/',
'dc': 'http://purl.org/dc/elements/1.1/',
'content': 'http://purl.org/rss/1.0/modules/content/',
'excerpt': 'http://wordpress.org/export/1.2/excerpt/'
}
import_cache = tempfile.gettempdir()
def _check_video_templates(self):
# make sure we have some assumed Video templates in the database
try:
Template.objects.get(name=DEFAULT_VIDLY_NAME)
except Template.DoesNotExist:
Template.objects.create(
name=DEFAULT_VIDLY_NAME,
content=DEFAULT_VIDLY_TEMPLATE
)
try:
Template.objects.get(name=DEFAULT_OGG_NAME)
except Template.DoesNotExist:
Template.objects.create(
name=DEFAULT_OGG_NAME,
content=DEFAULT_OGG_TEMPLATE
)
def handle(self, *args, **options):
if options['clear']:
for e in Event.objects.all():
e.delete()
self._check_video_templates()
attachments = {}
try:
wordpress_xml_dump = args[0]
item_parser = etree.iterparse(wordpress_xml_dump, tag='item')
except IndexError:
raise CommandError('Please provide an XML dump.')
except IOError:
raise CommandError('The provided file does not exist or is not'
' a valid Wordpress XML dump')
try:
self.default_thumb_path = args[1]
self.default_thumb = open(self.default_thumb_path, 'rb')
except IOError:
raise CommandError('Please provide a valid default thumbnail.')
for _, element in item_parser:
fields = {
'title': 'title',
'status': 'wp:status',
'start_time': 'pubDate',
'description': 'content:encoded',
'short_description': 'excerpt:encoded',
'created': 'wp:post_date',
'slug': 'wp:post_name',
'type': 'wp:post_type',
'attachment': 'wp:attachment_url',
'post_id': 'wp:post_id'
}
item = self.extract_item(element, fields)
if Event.objects.filter(slug=item['slug']).exists():
self.stdout.write(
'Event %s already exists, skipping.\n' % item['slug']
)
continue
if item['type'] == 'attachment':
# The item is a thumbnail attachment; save for later
attachments[item['post_id']] = item['attachment']
elif item['type'] == 'post':
# Create and initiate a new event
event = Event()
event.title = item['title']
event.slug = item['slug']
try:
event.start_time = datetime.datetime.strptime(
item['start_time'],
'%a, %d %b %Y %H:%M:%S +0000'
).replace(tzinfo=utc)
except ValueError:
event.start_time = datetime.datetime.strptime(
item['created'],
'%Y-%m-%d %H:%M:%S'
).replace(tzinfo=utc)
event.archive_time = (
event.start_time + datetime.timedelta(hours=1)
)
# Set status & public status from WP metadata
event.status = Event.STATUS_INITIATED
event.public = False
if item['status'] == 'publish':
event.status = Event.STATUS_SCHEDULED
event.public = True
elif item['status'] == 'private':
event.status = Event.STATUS_SCHEDULED
elif item['status'] == 'trash':
event.status = Event.STATUS_REMOVED
# Parse out the video from the event description
event.description = 'n/a'
if item['description']:
self.parse_description(event, item['description'])
event.short_description = item['short_description'] or ''
# Add categories and tags
event.save()
for category in element.findall('category'):
domain = category.attrib['domain']
text = category.text
if domain == 'category' and not event.category:
cat, _ = Category.objects.get_or_create(name=text)
event.category = cat
else:
tag = text.lower().strip()
tag_add, _ = Tag.objects.get_or_create(name=tag)
event.tags.add(tag_add)
# Add thumbnail and save
thumbnail_id = 0
for meta in element.findall('wp:postmeta',
namespaces=self.nsmap):
meta_key, meta_val = meta.getchildren()
if meta_key.text == '_thumbnail_id':
thumbnail_id = meta_val.text
if thumbnail_id in attachments:
self.attach_thumbnail(event, attachments[thumbnail_id])
else:
self.attach_thumbnail(event)
self.stdout.write(
'No thumb found for %s, used default.\n' % event.slug
)
event.save()
self.stdout.write('Saved event %s\n' % event.slug)
def extract_item(self, element, fields):
"""Returns a shortcut dictionary of element's children parsed
according to fields (destination_key: source_child_tag)."""
item = {}
for name, tag in fields.iteritems():
child = element.find(tag, namespaces=self.nsmap)
try:
item[name] = child.text.encode('utf-8').strip()
except AttributeError:
item[name] = None
return item
def parse_description(self, event, description_raw):
"""Parse out video embeds from the description, correctly set
templates and their environments; leave descriptions clean."""
vidly_tag = re.compile('\[vidly code="(\w+)?"\]')
vidly_template = Template.objects.get(name='Vid.ly')
ogg_tag = re.compile('<video src="([^"]*)".*?>')
ogg_template = Template.objects.get(name='Ogg Video')
event.description = description_raw
vidly_search = vidly_tag.search(description_raw)
ogg_search = ogg_tag.search(description_raw)
if vidly_search:
event.description = event.description.replace(
vidly_search.group(0), ''
)
event.template = vidly_template
event.template_environment = {'tag': vidly_search.group(1)}
elif ogg_search:
event.description = event.description.replace(
ogg_search.group(0), ''
)
event.template = ogg_template
event.template_environment = {'url': ogg_search.group(1)}
else:
event.status = Event.STATUS_REMOVED
event.description = event.description.strip()
def attach_thumbnail(self, event, url=None):
"""Download, cache, and attach an event's placeholder image."""
if not url:
# Use a default image, provided
_, ext = os.path.splitext(self.default_thumb_path)
img_temp = File(self.default_thumb)
else:
_, ext = os.path.splitext(url)
cache_path = os.path.join(self.import_cache, event.slug) + ext
try:
# Read a cached image
img_temp = File(
open(cache_path, 'rb')
)
except IOError:
# Download and create the image
img_temp = File(
open(cache_path, 'wb+')
)
img_temp.write(urllib2.urlopen(url).read())
img_temp.flush()
event.placeholder_img.save('img%s' % ext, img_temp)
| {
"content_hash": "6eee4b1df86f795e424d5e91f0423b09",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 152,
"avg_line_length": 41.276595744680854,
"alnum_prop": 0.5261855670103093,
"repo_name": "peterbe/airmozilla",
"id": "e425491abf0a0084e37b60ca7ab86a09de60e97b",
"size": "9700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airmozilla/manage/management/commands/wp_import.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "70585"
},
{
"name": "JavaScript",
"bytes": "10192"
},
{
"name": "Puppet",
"bytes": "6677"
},
{
"name": "Python",
"bytes": "1235514"
},
{
"name": "Shell",
"bytes": "3672"
}
],
"symlink_target": ""
} |
"""Common brick related utilities."""
from __future__ import print_function
import os
from chromite.lib import osutils
from chromite.lib import workspace_lib
_DEFAULT_LAYOUT_CONF = {'profile_eapi_when_unspecified': '5-progress',
'profile-formats': 'portage-2 profile-default-eapi',
'thin-manifests': 'true',
'use-manifests': 'true'}
_CONFIG_FILE = 'config.json'
_IGNORED_OVERLAYS = ('portage-stable', 'chromiumos', 'eclass-overlay')
class BrickCreationFailed(Exception):
"""The brick creation failed."""
class BrickNotFound(Exception):
"""The brick does not exist."""
class BrickFeatureNotSupported(Exception):
"""Attempted feature not supported for this brick."""
class Brick(object):
"""Encapsulates the interaction with a brick."""
def __init__(self, brick_loc, initial_config=None, allow_legacy=True):
"""Instantiates a brick object.
Args:
brick_loc: brick locator. This can be a relative path to CWD, an absolute
path, a public board name prefix with 'board:' or a relative path to the
root of the workspace, prefixed with '//').
initial_config: The initial configuration as a python dictionary.
If not None, creates a brick with this configuration.
allow_legacy: Allow board overlays, simulating a basic read-only config.
Ignored if |initial_config| is not None.
Raises:
ValueError: If |brick_loc| is invalid.
LocatorNotResolved: |brick_loc| is valid but could not be resolved.
BrickNotFound: If |brick_loc| does not point to a brick and no initial
config was provided.
BrickCreationFailed: when the brick could not be created successfully.
"""
if workspace_lib.IsLocator(brick_loc):
self.brick_dir = workspace_lib.LocatorToPath(brick_loc)
self.brick_locator = brick_loc
else:
self.brick_dir = brick_loc
self.brick_locator = workspace_lib.PathToLocator(brick_loc)
self.config = None
self.legacy = False
config_json = os.path.join(self.brick_dir, _CONFIG_FILE)
if not os.path.exists(config_json):
if initial_config:
if os.path.exists(self.brick_dir):
raise BrickCreationFailed('directory %s already exists.'
% self.brick_dir)
success = False
try:
self.UpdateConfig(initial_config)
osutils.SafeMakedirs(self.OverlayDir())
osutils.SafeMakedirs(self.SourceDir())
success = True
except BrickNotFound as e:
# If BrickNotFound was raised, the dependencies contain a missing
# brick.
raise BrickCreationFailed('dependency not found %s' % e)
finally:
if not success:
# If the brick creation failed for any reason, cleanup the partially
# created brick.
osutils.RmDir(self.brick_dir, ignore_missing=True)
elif allow_legacy:
self.legacy = True
try:
masters = self._ReadLayoutConf().get('masters')
masters_list = masters.split() if masters else []
# Keep general Chromium OS overlays out of this list as they are
# handled separately by the build system.
deps = ['board:' + d for d in masters_list
if d not in _IGNORED_OVERLAYS]
self.config = {'name': self._ReadLayoutConf()['repo-name'],
'dependencies': deps}
except (IOError, KeyError):
pass
if self.config is None:
raise BrickNotFound('Brick not found at %s' % self.brick_dir)
elif initial_config is None:
self.config = workspace_lib.ReadConfigFile(config_json)
else:
raise BrickCreationFailed('brick %s already exists.' % self.brick_dir)
self.friendly_name = None
if not self.legacy:
self.friendly_name = workspace_lib.LocatorToFriendlyName(
self.brick_locator)
def _LayoutConfPath(self):
"""Returns the path to the layout.conf file."""
return os.path.join(self.OverlayDir(), 'metadata', 'layout.conf')
def _WriteLayoutConf(self, content):
"""Writes layout.conf.
Sets unset fields to a sensible default and write |content| in layout.conf
in the right format.
Args:
content: dictionary containing the set fields in layout.conf.
"""
for k, v in _DEFAULT_LAYOUT_CONF.iteritems():
content.setdefault(k, v)
content_str = ''.join(['%s = %s\n' % (k, v)
for k, v in content.iteritems()])
osutils.WriteFile(self._LayoutConfPath(), content_str, makedirs=True)
def _ReadLayoutConf(self):
"""Returns the content of layout.conf as a Python dictionary."""
def ParseConfLine(line):
k, _, v = line.partition('=')
return k.strip(), v.strip() or None
content_str = osutils.ReadFile(self._LayoutConfPath())
return dict(ParseConfLine(line) for line in content_str.splitlines())
def UpdateConfig(self, config, regenerate=True):
"""Updates the brick's configuration.
Writes |config| to the configuration file.
If |regenerate| is true, regenerate the portage configuration files in
this brick to match the new configuration.
Args:
config: brick configuration as a python dict.
regenerate: if True, regenerate autogenerated brick files.
"""
if self.legacy:
raise BrickFeatureNotSupported(
'Cannot update configuration of legacy brick %s' % self.brick_dir)
self.config = config
# All objects must be unambiguously referenced. Normalize all the
# dependencies according to the workspace.
self.config['dependencies'] = [d if workspace_lib.IsLocator(d)
else workspace_lib.PathToLocator(d)
for d in self.config.get('dependencies', [])]
workspace_lib.WriteConfigFile(os.path.join(self.brick_dir, _CONFIG_FILE),
config)
if regenerate:
self.GeneratePortageConfig()
def GeneratePortageConfig(self):
"""Generates all autogenerated brick files."""
# We don't generate anything in legacy brick so everything is up-to-date.
if self.legacy:
return
deps = [b.config['name'] for b in self.Dependencies()]
self._WriteLayoutConf(
{'masters': ' '.join(
['eclass-overlay', 'portage-stable', 'chromiumos'] + deps),
'repo-name': self.config['name']})
def Dependencies(self):
"""Returns the dependent bricks."""
return [Brick(d) for d in self.config.get('dependencies', [])]
def Inherits(self, brick_name):
"""Checks whether this brick contains |brick_name|.
Args:
brick_name: The name of the brick to check containment.
Returns:
Whether |brick_name| is contained in this brick.
"""
return brick_name in [b.config['name'] for b in self.BrickStack()]
def MainPackages(self):
"""Returns the brick's main package(s).
This finds the 'main_package' property. It nevertheless returns a (single
element) list as it is easier to work with.
Returns:
A list of main packages; empty if no main package configured.
"""
main_package = self.config.get('main_package')
return [main_package] if main_package else []
def OverlayDir(self):
"""Returns the brick's overlay directory."""
if self.legacy:
return self.brick_dir
return os.path.join(self.brick_dir, 'packages')
def SourceDir(self):
"""Returns the project's source directory."""
return os.path.join(self.brick_dir, 'src')
def FriendlyName(self):
"""Return the friendly name for this brick.
This name is used as the board name for legacy commands (--board).
"""
if self.friendly_name is None:
raise BrickFeatureNotSupported()
return self.friendly_name
def BrickStack(self):
"""Returns the brick stack for this brick.
Returns:
A list of bricks, respecting the partial ordering of bricks as defined by
dependencies, ordered from the lowest priority to the highest priority.
"""
seen = set()
def _stack(brick):
seen.add(brick.brick_dir)
l = []
for dep in brick.Dependencies():
if dep.brick_dir not in seen:
l.extend(_stack(dep))
l.append(brick)
return l
return _stack(self)
def FindBrickInPath(path=None):
"""Returns the root directory of the brick containing a path.
Return the first parent directory of |path| that is the root of a brick.
This method is used for brick auto-detection and does not consider legacy.
Args:
path: path to a directory. If |path| is None, |path| will be set to CWD.
Returns:
The path to the first parent that is a brick directory if one exist.
Otherwise return None.
"""
for p in osutils.IteratePathParents(path or os.getcwd()):
try:
return Brick(p, allow_legacy=False)
except BrickNotFound:
pass
return None
| {
"content_hash": "77def8ba98afcb64b156822a81edc403",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 80,
"avg_line_length": 33.486988847583646,
"alnum_prop": 0.6465364120781527,
"repo_name": "guorendong/iridium-browser-ubuntu",
"id": "0925688166a8862590ae6e8372dbc195b41e90c5",
"size": "9174",
"binary": false,
"copies": "1",
"ref": "refs/heads/ubuntu/precise",
"path": "third_party/chromite/lib/brick_lib.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "8402"
},
{
"name": "Assembly",
"bytes": "256197"
},
{
"name": "Batchfile",
"bytes": "34966"
},
{
"name": "C",
"bytes": "15445429"
},
{
"name": "C++",
"bytes": "276628399"
},
{
"name": "CMake",
"bytes": "27829"
},
{
"name": "CSS",
"bytes": "867238"
},
{
"name": "Emacs Lisp",
"bytes": "3348"
},
{
"name": "Go",
"bytes": "13628"
},
{
"name": "Groff",
"bytes": "7777"
},
{
"name": "HTML",
"bytes": "20250399"
},
{
"name": "Java",
"bytes": "9950308"
},
{
"name": "JavaScript",
"bytes": "13873772"
},
{
"name": "LLVM",
"bytes": "1169"
},
{
"name": "Logos",
"bytes": "6893"
},
{
"name": "Lua",
"bytes": "16189"
},
{
"name": "Makefile",
"bytes": "179129"
},
{
"name": "Objective-C",
"bytes": "1871766"
},
{
"name": "Objective-C++",
"bytes": "9674498"
},
{
"name": "PHP",
"bytes": "42038"
},
{
"name": "PLpgSQL",
"bytes": "163248"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "474121"
},
{
"name": "Python",
"bytes": "11646662"
},
{
"name": "Ragel in Ruby Host",
"bytes": "104923"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "1151673"
},
{
"name": "Standard ML",
"bytes": "5034"
},
{
"name": "VimL",
"bytes": "4075"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
} |
import argparse
import asyncio
import logging
import itertools
import aiosip
sip_config = {
'local_ip': '127.0.0.1',
'local_port': 6000
}
async def notify(dialog):
for idx in itertools.count(1):
await dialog.notify(payload=str(idx))
await asyncio.sleep(1)
async def on_subscribe(request, message):
expires = int(message.headers['Expires'])
dialog = await request.prepare(status_code=200,
headers={'Expires': expires})
if not expires:
return
print('Subscription started!')
task = asyncio.ensure_future(notify(dialog))
async for message in dialog:
expires = int(message.headers['Expires'])
await dialog.reply(message, 200, headers={'Expires': expires})
if expires == 0:
break
task.cancel()
print('Subscription ended!')
class Dialplan(aiosip.BaseDialplan):
async def resolve(self, *args, **kwargs):
await super().resolve(*args, **kwargs)
if kwargs['method'] == 'SUBSCRIBE':
return on_subscribe
def start(app, protocol):
app.loop.run_until_complete(
app.run(
protocol=protocol,
local_addr=(sip_config['local_ip'], sip_config['local_port'])))
print('Serving on {} {}'.format(
(sip_config['local_ip'], sip_config['local_port']), protocol))
try:
app.loop.run_forever()
except KeyboardInterrupt:
pass
print('Closing')
app.loop.run_until_complete(app.close())
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--protocol', default='udp')
args = parser.parse_args()
loop = asyncio.get_event_loop()
app = aiosip.Application(loop=loop, dialplan=Dialplan())
if args.protocol == 'udp':
start(app, aiosip.UDP)
elif args.protocol == 'tcp':
start(app, aiosip.TCP)
elif args.protocol == 'ws':
start(app, aiosip.WS)
else:
raise RuntimeError("Unsupported protocol: {}".format(args.protocol))
loop.close()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
main()
| {
"content_hash": "3cfee866ffa32754ce545a5ce68f21b3",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 76,
"avg_line_length": 23.788888888888888,
"alnum_prop": 0.6090611863615133,
"repo_name": "Eyepea/aiosip",
"id": "2b5e484215a683487d6f531e3c94b40ebf589d87",
"size": "2141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/subscribe/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1617"
},
{
"name": "Python",
"bytes": "109625"
}
],
"symlink_target": ""
} |
def how_user_handles_food(user):
try:
if user.handles_food_with_vigilance:
return "vigilance"
except AttributeError:
return "probably a sucker." | {
"content_hash": "270d13a900ef99739387b03e76569b60",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 44,
"avg_line_length": 30,
"alnum_prop": 0.6333333333333333,
"repo_name": "SlashRoot/WHAT",
"id": "f7f8777c4120d6e87e5b2115ca5195a0e9c8fe05",
"size": "180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "what_apps/presence/functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "91763"
},
{
"name": "CoffeeScript",
"bytes": "1746"
},
{
"name": "JavaScript",
"bytes": "486131"
},
{
"name": "Python",
"bytes": "707045"
}
],
"symlink_target": ""
} |
"""
`Unit tests for cargo.logic.NetworkingLogic`
--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--
2016 Jared Lunde © The MIT License (MIT)
http://github.com/jaredlunde
"""
from cargo.logic import NetworkingLogic
from unit_tests import configure
from unit_tests.configure import new_field
class TestNetworkingLogic(configure.LogicTestCase):
def setUp(self):
self.base = new_field('ip')
self.base.field_name = 'bar'
self.base.table = 'foo'
if __name__ == '__main__':
# Unit test
configure.run_tests(TestNetworkingLogic, failfast=True, verbosity=2)
| {
"content_hash": "c43677b7e2f55659c0ef06d211729692",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 80,
"avg_line_length": 27.304347826086957,
"alnum_prop": 0.6003184713375797,
"repo_name": "jaredlunde/cargo-orm",
"id": "dbe59721111462eaf96bf4849702ecb741623c3c",
"size": "701",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "unit_tests/logic/NetworkingLogic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1155740"
},
{
"name": "Shell",
"bytes": "288"
}
],
"symlink_target": ""
} |
"""
epitopepredict analysis methods
Created September 2013
Copyright (C) Damien Farrell
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 3
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from __future__ import absolute_import, print_function
import sys, os, shutil, string, types
import csv, glob, pickle, itertools
import math
import re
import time, random
from collections import OrderedDict
from operator import itemgetter
import numpy as np
import pandas as pd
import subprocess
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
from . import base, sequtils, tepitope, utilities, peptutils
home = os.path.expanduser("~")
#fix paths!
genomespath = os.path.join(home, 'epitopedata')
datadir = os.path.join(home, 'testpredictions')
def get_AAcontent(df, colname, amino_acids=None):
"""Amino acid composition for dataframe with sequences"""
return df.apply( lambda r: peptutils.get_AAfraction(str(r[colname]), amino_acids), 1)
def net_charge(df, colname):
"""Net peptide charge for dataframe with sequences"""
return df.apply( lambda r: peptutils.net_charge(r[colname]),1)
def isoelectric_point(df):
def getpi(seq):
X = ProteinAnalysis(seq)
return X.isoelectric_point()
return df.apply( lambda r: getpi(r.peptide),1)
def peptide_properties(df, colname='peptide'):
"""Find hydrophobicity and net charge for peptides"""
df['hydro'] = get_AAcontent(df, colname)
df['net_charge'] = net_charge(df, colname)
return df
def _center_nmer(x, n):
"""Get n-mer sequence for a peptide centered in the middle.
This should be applied to a dataframe per row.
Returns: a single sequence centred on the peptide
"""
seq = x['translation']
size = x.end-x.start
l = int((size-n)/2.0)
if size>n:
if size%2 == 1: l1 = l+1
else: l1=l
start = x.start+l1
end = x.end-l
elif size<=n:
if size%2 == 1: l1 = l-1
else: l1=l
start = x.start+l1
end = x.end-l
if start<=0:
d=1-start
start = start+d
end = end+d
seq = seq[start:end]
#print(size, x.peptide, x.start, x.end, l, l1, start, end, seq, len(seq))
return seq
def _split_nmer(x, n, key, margin=3, colname='peptide'):
"""Row based method to split a peptide in to multiple n-mers
if it's too large. Returns a dataframe of 3 cols so should be
applied using iterrows and then use concat.
Args:
x: row item
n: length to split on
key:
"""
size = x.end-x.start
m = margin
if size <= n+m:
seq = _center_nmer(x, n)
return pd.DataFrame({colname: seq},index=[0])
else:
seq = x[key]
o=size%n
#print (size, o)
if o<=margin:
size=size-o
seq = _center_nmer(x, size)
#print (size)
seqs=[]
seq = x[key][x.start:x.end]
if x.start==0: s=1
else: s=0
for i in range(s, size, n):
if i+n>size:
seqs.append(seq[o:o+n])
#print (x.name,seq[x.start:x.start+n])
else:
seqs.append(seq[i:i+n])
#print (seq[i:i+n])
seqs = pd.Series(seqs)
d = pd.DataFrame({colname:seqs})
return d
def create_nmers(df, genome, length=20, seqkey='translation', key='nmer', how='split', margin=0):
"""
Get n-mer peptide surrounding a set of sequences using the host
protein sequence.
Args:
df: input dataframe with sequence name and start/end coordinates
genome: genome dataframe with host sequences
length: length of nmer to return
seqkey: column name of sequence to be processed
how: method to create the n-mer, 'split' will try to split up
the sequence into overlapping n-mes of length is larger than size
margin: do not split sequences below length+margin
Returns:
pandas Series with nmer values
"""
cols = ['locus_tag','gene','translation']
cols = list(set(cols) & set(genome.columns))
#merge with genome dataframe but must keep index for re-merging
if len(df)==0:
return
temp = df.merge(genome[cols],left_on='name',right_on='locus_tag',
how='left')#.set_index(df.index)
#print (temp)
if not 'end' in list(temp.columns):
temp = base.get_coords(temp)
#temp = base.get_coords(temp)
if how == 'center':
temp[key] = temp.apply( lambda r: _center_nmer(r, length), 1)
res = temp
elif how == 'split':
res=[]
for n,r in temp.iterrows():
d = _split_nmer(r, length, seqkey, margin, key)
d['index']=n
d.set_index('index',inplace=True)
res.append(d)
res = pd.concat(res)
#print (res)
res = temp.merge(res,left_index=True,right_index=True,how='right').reset_index(drop=True)
#print (res)
res=res.drop([seqkey],1)
return res
def get_overlaps(df1, df2, label='overlap', how='inside'):
"""
Overlaps for 2 sets of sequences where the positions in host sequence are stored
in each dataframe as 'start' and 'end' columns
Args:
df1 : first set of sequences, a pandas dataframe with columns called
start/end or pos
df2: second set of sequences
label: label for overlaps column
how: may be 'any' or 'inside'
Returns:
First DataFrame with no. of overlaps stored in a new column
"""
new=[]
a = base.get_coords(df1)
b = base.get_coords(df2)
def overlap(x,y):
f=0
#print x['name'],x.peptide
#print (x.start,x.end)
for i,r in y.iterrows():
if how == 'inside':
if ((x.start<=r.start) & (x.end>=r.end)):
f+=1
elif how == 'any':
if ((x.start<=r.start) & (x.end>r.start)) or \
((x.start>=r.start) & (x.start<r.end)):
#t = abs(r.start-x.start)
#print (a, b)
f+=1
#print (r.start,r.end, f)
return f
for n,df in a.groupby('name'):
found = b[b.name==n]
df[label] = df.apply(lambda r: overlap(r,found),axis=1)
new.append(df)
result = pd.concat(new)
#print ('%s with overlapping sequences' %len(result[result[label]>0]))
return result
def get_orthologs(seq, db=None, expect=1, hitlist_size=400, equery=None,
email=''):
"""
Fetch orthologous sequences using remote or local blast and return the records
as a dataframe.
Args:
seq: sequence to blast
db: the name of a local blast db
expect: expect value
equery: Entrez Gene Advanced Search options,
(see http://www.ncbi.nlm.nih.gov/books/NBK3837/)
Returns:
blast results in a pandas dataframe
"""
from Bio.Blast import NCBIXML,NCBIWWW
from Bio import Entrez, SeqIO
Entrez.email = email
print ('running blast..')
if db != None:
#local blast
SeqIO.write(SeqRecord(Seq(seq)), 'tempseq.faa', "fasta")
sequtils.local_blast(db, 'tempseq.faa', output='my_blast.xml', maxseqs=100)
result_handle = open("my_blast.xml")
df = sequtils.get_blast_results(result_handle)
else:
try:
result_handle = NCBIWWW.qblast("blastp", "nr", seq, expect=expect,
hitlist_size=500,entrez_query=equery)
time.sleep(2)
savefile = open("my_blast.xml", "w")
savefile.write(result_handle.read())
savefile.close()
result_handle = open("my_blast.xml")
df = sequtils.get_blast_results(result_handle, local=False)
except Exception as e:
print ('blast timeout')
return
df = df.drop(['subj','positive','query_length','score'],1)
df.drop_duplicates(subset=['definition','perc_ident'], inplace=True)
df = df[df['perc_ident']!=100]
return df
def alignment_to_dataframe(aln):
alnrows = [[str(a.id),str(a.seq)] for a in aln]
df = pd.DataFrame(alnrows,columns=['accession','seq'])
return df
def align_blast_results(df, aln=None, idkey='accession', productkey='definition'):
"""
Get gapped alignment from blast results using muscle aligner.
"""
sequtils.dataframe_to_fasta(df, idkey=idkey, seqkey='sequence',
descrkey=productkey, outfile='blast_found.faa')
aln = sequtils.muscle_alignment("blast_found.faa")
alnrows = [[a.id,str(a.seq)] for a in aln]
alndf = pd.DataFrame(alnrows,columns=['accession','seq'])
#res = df.merge(alndf, left_index=True, right_index=True)
res = df.merge(alndf, on=['accession'])
res = res.drop('sequence',1)
#get rid of duplicate hits
#res.drop_duplicates(subset=['definition','seq'], inplace=True)
res = res.sort_values(by='identity',ascending=False)
print ('%s hits, %s filtered' %(len(df), len(res)))
return res, aln
def get_species_name(s):
"""Find [species name] in blast result definition"""
m = re.search(r"[^[]*\[([^]]*)\]", s)
if m == None:
return s
return m.groups()[0]
def find_conserved_sequences(seqs, alnrows):
"""
Find if sub-sequences are conserved in given set of aligned sequences
Args:
seqs: a list of sequences to find
alnrows: a dataframe of aligned protein sequences
Returns:
a pandas DataFrame of 1 or 0 values for each protein/search sequence
"""
f=[]
for i,a in alnrows.iterrows():
sequence = a.seq
found = [sequence.find(j) for j in seqs]
f.append(found)
for n in ['species','accession','name']:
if n in alnrows.columns:
ind = alnrows[n]
break
s = pd.DataFrame(f,columns=seqs,index=ind)
s = s.replace(-1,np.nan)
s[s>0] = 1
res = s.count()
return s
def epitope_conservation(peptides, alnrows=None, proteinseq=None, blastresult=None,
blastdb=None, perc_ident=50, equery='srcdb_refseq[Properties]'):
"""
Find and visualise conserved peptides in a set of aligned sequences.
Args:
peptides: a list of peptides/epitopes
alnrows: a dataframe of previously aligned sequences e.g. custom strains
proteinseq: a sequence to blast and get an alignment for
blastresult: a file of saved blast results in plain csv format
equery: blast query string
Returns:
Matrix of 0 or 1 for conservation for each epitope/protein variant
"""
import seaborn as sns
sns.set_context("notebook", font_scale=1.4)
if alnrows is None:
if proteinseq == None:
print ('protein sequence to blast or alignment required')
return
if blastresult == None or not os.path.exists(blastresult):
blr = get_orthologs(proteinseq, equery=equery, blastdb=blastdb)
if blr is None:
return
#if filename == None: filename = 'blast_%s.csv' %label
blr.to_csv(blastresult)
else:
blr = pd.read_csv(blastresult, index_col=0)
#blr = blr[blr.perc_ident>=perc_ident]
alnrows, aln = align_blast_results(blr)
#print (sequtils.formatAlignment(aln))
if 'perc_ident' in alnrows.columns:
alnrows = alnrows[alnrows.perc_ident>=perc_ident]
if 'definition' in alnrows.columns:
alnrows['species'] = alnrows.definition.apply(get_species_name)
c = find_conserved_sequences(peptides, alnrows).T
c = c.dropna(how='all')
c = c.reindex_axis(c.sum(1).sort_values().index)
if len(c) == 0:
print ('no conserved epitopes in any sequence')
return
return c
def _region_query(P, eps, D):
neighbour_pts = []
for point in D:
if abs(P - point)<eps:
neighbour_pts.append(point)
return neighbour_pts
def _expand_cluster(P, neighbour_pts, C, c_n, eps, min_pts, D, visited):
flatten = lambda l: [i for sublist in l for i in sublist]
C[c_n].append(P)
for point in neighbour_pts:
if point not in visited:
visited.append(point)
neighbour_pts_2 = _region_query(point, eps, D)
if len(neighbour_pts_2) >= min_pts:
neighbour_pts += neighbour_pts_2
#print (point,C)
if point not in flatten(C):
C[c_n].append(point)
def _dbscan(D, eps=5, minsize=2):
"""
1D intervals using dbscan. Density-Based Spatial clustering.
Finds core samples of high density and expands clusters from them.
"""
from numpy.random import rand
noise = []
visited = []
C = []
c_n = -1
for point in D:
visited.append(point)
neighbour_pts = _region_query(point, eps, D)
if len(neighbour_pts) < minsize:
noise.append(point)
else:
C.append([])
c_n+=1
_expand_cluster(point, neighbour_pts, C, c_n,eps, minsize, D, visited)
C = [i for i in C if len(i)>=minsize]
#for cl in C:
# print (cl)
return C
def dbscan(B=None, x=None, dist=7, minsize=4):
"""Use dbscan algorithm to cluster binder positions"""
if B is not None:
if len(B)==0:
return
x = sorted(B.pos.astype('int'))
clusts = _dbscan(x, dist, minsize)
#print (clusts)
return clusts
def find_clusters(binders, dist=None, min_binders=2, min_size=12, max_size=50,
genome=None, colname='peptide'):
"""
Get clusters of binders for a set of binders.
Args:
binders: dataframe of binders
dist: distance over which to apply clustering
min_binders : minimum binders to be considered a cluster
min_size: smallest cluster length to return
max_size: largest cluster length to return
colname: name for cluster sequence column
Returns:
a pandas Series with the new n-mers (may be longer than the initial dataframe
if splitting)
"""
C=[]
grps = list(binders.groupby('name'))
length = binders.head(1).peptide.str.len().max()
#print (length)
if dist == None:
dist = length+1
#print ('using dist for clusters: %s' %dist)
for n,b in grps:
if len(b)==0: continue
clusts = dbscan(b,dist=dist,minsize=min_binders)
if len(clusts) == 0:
continue
for c in clusts:
gaps = [c[i]-c[i-1] for i in range(1,len(c))]
C.append([n,min(c),max(c)+length,len(c)])
if len(C)==0:
print ('no clusters')
return pd.DataFrame()
x = pd.DataFrame(C,columns=['name','start','end','binders'])
x['length'] = (x.end-x.start)
x = x[x['length']>=min_size]
x = x[x['length']<=max_size]
x=x.sort_values(['binders','length'],ascending=False)
if genome is not None:
cols = ['locus_tag','translation']
if 'gene' in genome.columns:
cols.append('gene')
x = x.merge(genome[cols],
left_on='name',right_on='locus_tag')
x[colname] = x.apply(lambda r: r.translation[r.start:r.end], 1)
x = x.drop(['locus_tag','translation'],1)
x = x.drop_duplicates(colname)
x = x.sort_values(by=['binders'],ascending=False)
x = x.reset_index(drop=True)
#print ('%s clusters found in %s proteins' %(len(x),len(x.groupby('name'))))
#print
return x
def randomize_dataframe(df, seed=8):
"""Randomize order of dataframe"""
np.random.seed(seed=seed)
new = df.reset_index(drop=True)
new = new.reindex(np.random.permutation(new.index))
return new
def save_to_excel(df, n=94, filename='peptide_lists'):
"""
Save a dataframe to excel with option of writing in chunks.
"""
writer = pd.ExcelWriter('%s.xls' %filename)
i=1
chunks = df.groupby(np.arange(len(df)) // n)
for g,c in chunks:
c.to_excel(writer,'list'+str(i))
i+=1
writer.save()
return
def tmhmm(fastafile=None, infile=None):
"""
Get TMhmm predictions
Args:
fastafile: fasta input file to run
infile: text file with tmhmm prediction output
"""
if infile==None:
tempfile = 'tmhmm_temp.txt'
cmd = 'tmhmm %s > %s' %(fastafile,tempfile)
infile = subprocess.check_output(cmd, shell=True, executable='/bin/bash')
tmpred = pd.read_csv(infile, delim_whitespace=True, comment='#',
names=['locus_tag','v','status','start','end'])
tmpred = tmpred.dropna()
print ('tmhmm predictions for %s proteins' %len(tmpred.groupby('locus_tag')))
lengths=[]
for i,row in tmpred.iterrows():
if row.status == 'TMhelix':
lengths.append(row.end-row.start)
#print np.mean(lengths), np.std(lengths)
return tmpred
def signalP(infile=None,genome=None):
"""Get signal peptide predictions"""
if genome != None:
seqfile = Genome.genbank2Fasta(genome)
tempfile = 'signalp_temp.txt'
cmd = 'signalp -t gram+ -f short %s > %s' %(seqfile,tempfile)
infile = subprocess.check_output(cmd, shell=True, executable='/bin/bash')
sp = pd.read_csv(infile,delim_whitespace=True,comment='#',skiprows=2,
names=['locus_tag','Cmax','cpos','Ymax','ypos','Smax',
'spos','Smean','D','SP','Dmaxcut','net'])
#print sp[sp.SP=='Y']
return sp
def get_seqdepot(seq):
"""Fetch seqdepot annotation for sequence"""
from epitopepredict import seqdepot
reload(seqdepot)
sd = seqdepot.new()
aseqid = sd.aseqIdFromSequence(seq)
try:
result = sd.findOne(aseqid)
except Exception as e:
print (e)
result=None
return result
def prediction_coverage(expdata, binders, key='sequence', perc=50, verbose=False):
"""
Determine hit rate of predictions in experimental data
by finding how many top peptides are needed to cover % positives
Args:
expdata: dataframe of experimental data with peptide sequence and name column
binders: dataframe of ranked binders created from predictor
key: column name in expdata for sequence
Returns:
fraction of predicted binders required to find perc total response
"""
def getcoverage(data, peptides, key):
#get coverage for single sequence
target = math.ceil(len(data)*perc/100.0)
if verbose == True:
print (len(data), target)
#print data[key]
#print peptides[peptides.isin(data[key])]
found=[]
count=0
for p in peptides:
for i,r in data.iterrows():
#print p, r[key]
if r[key] in found:
continue
if r[key].find(p)!=-1 or p.find(r[key])!=-1:
found.append(r[key])
if verbose == True:
print (count, p, r[key])
continue
count+=1
if len(found) >= target:
if verbose == True:
print (count, target)
print ('--------------')
return count
if verbose == True:
print ('not all sequences found', count, target)
return count
total = 0
for name, data in expdata.groupby('name'):
peptides = binders[binders.name==name].peptide
if len(peptides) == 0:
continue
if verbose == True: print (name)
#print (binders[binders.name==name][:5])
c = getcoverage(data, peptides, key)
total += c
#print (total, total/float(len(binders))*100)
return round(total/float(len(binders))*100,2)
def test_features():
"""test feature handling"""
fname = os.path.join(datadir,'MTB-H37Rv.gb')
df = sequtils.genbank2Dataframe(fname, cds=True)
df = df.set_index('locus_tag')
keys = df.index
name='Rv0011c'
row = df.ix[name]
seq = row.translation
prod = row['product']
rec = SeqRecord(Seq(seq),id=name,description=prod)
fastafmt = rec.format("fasta")
print (fastafmt)
print (row.to_dict())
ind = keys.get_loc(name)
previous = keys[ind-1]
if ind<len(keys)-1:
next = keys[ind+1]
else:
next=None
return
def testrun(gname):
method = 'tepitope'#'iedbmhc1'#'netmhciipan'
path='test'
gfile = os.path.join(genomespath,'%s.gb' %gname)
df = sequtils.genbank2Dataframe(gfile, cds=True)
#names = list(df.locus_tag[:1])
names=['VP24']
alleles1 = ["HLA-A*02:02", "HLA-A*11:01", "HLA-A*32:07", "HLA-B*15:17", "HLA-B*51:01",
"HLA-C*04:01", "HLA-E*01:03"]
alleles2 = ["HLA-DRB1*0101", "HLA-DRB1*0305", "HLA-DRB1*0812", "HLA-DRB1*1196", "HLA-DRB1*1346",
"HLA-DRB1*1455", "HLA-DRB1*1457", "HLA-DRB1*1612", "HLA-DRB4*0107", "HLA-DRB5*0203"]
P = base.getPredictor(method)
P.iedbmethod='IEDB_recommended' #'netmhcpan'
P.predictProteins(df,length=11,alleles=alleles2,names=names,
save=True, path=path)
f = os.path.join('test', names[0]+'.mpk')
df = pd.read_msgpack(f)
P.data=df
#b = P.get_binders(data=df)
#print b[:20]
base.getScoreDistributions(method, path)
return
def test_conservation(label,gname):
"""Conservation analysis"""
tag='VP24'
pd.set_option('max_colwidth', 800)
gfile = os.path.join(genomespath,'%s.gb' %gname)
g = sequtils.genbank2Dataframe(gfile, cds=True)
res = g[g['locus_tag']==tag]
seq = res.translation.head(1).squeeze()
print (seq)
#alnrows = getOrthologs(seq)
#alnrows.to_csv('blast_%s.csv' %tag)
alnrows = pd.read_csv('blast_%s.csv' %tag,index_col=0)
alnrows.drop_duplicates(subset=['accession'], inplace=True)
alnrows = alnrows[alnrows['perc_ident']>=60]
seqs=[SeqRecord(Seq(a.sequence),a.accession) for i,a in alnrows.iterrows()]
print (seqs[:2])
sequtils.distanceTree(seqs=seqs)#,ref=seqs[0])
#sequtils.ETETree(seqs, ref, metric)
#df = sequtils.getFastaProteins("blast_found.faa",idindex=3)
'''method='tepitope'
P = base.getPredictor(method)
P.predictSequences(df,seqkey='sequence')
b = P.get_binders()'''
return
def find_conserved_peptide(peptide, recs):
"""Find sequences where a peptide is conserved"""
f=[]
for i,a in recs.iterrows():
seq = a.sequence.replace('-','')
found = seq.find(peptide)
f.append(found)
s = pd.DataFrame(f,columns=['found'],index=recs.accession)
s = s.replace(-1,np.nan)
#print s
res = s.count()
return s
def test():
gname = 'ebolavirus'
label = 'test'
testrun(gname)
#testBcell(gname)
#testgenomeanalysis(label,gname,method)
#testconservation(label,gname)
#testFeatures()
return
if __name__ == '__main__':
pd.set_option('display.width', 600)
test()
| {
"content_hash": "0b914699cd22d6b1aecc1b3e00da449e",
"timestamp": "",
"source": "github",
"line_count": 705,
"max_line_length": 100,
"avg_line_length": 33.55744680851064,
"alnum_prop": 0.5958238228083523,
"repo_name": "dmnfarrell/mhcpredict",
"id": "2a785c2c07491dec7b767b85373258fc9c2ed1c0",
"size": "23681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "epitopepredict/analysis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "26344406"
},
{
"name": "Python",
"bytes": "110407"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('article', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='article',
name='abstract',
field=models.TextField(blank=True, null=True),
),
]
| {
"content_hash": "7724e086f3fc000a84592e79c7974c47",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 58,
"avg_line_length": 20.88888888888889,
"alnum_prop": 0.5877659574468085,
"repo_name": "YoungsonZhao/Blog-Django",
"id": "67f85d8ef3efe7df3d311d5f52ee24bad30e61c2",
"size": "449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "article/migrations/0002_article_abstract.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "121150"
},
{
"name": "CSS",
"bytes": "490819"
},
{
"name": "HTML",
"bytes": "26719"
},
{
"name": "JavaScript",
"bytes": "662736"
},
{
"name": "Python",
"bytes": "18711"
}
],
"symlink_target": ""
} |
import sys
# Take input
if len(sys.argv) > 1:
N = int(sys.argv[1])
else:
N = int(input("N? "))
current = N # Repeat until we get to 1
while current != 1:
# Decide what move we take
if current % 3 == 0:
move = 0 # 0 if it's already divisible
elif current % 3 == 1:
move = -1 # -1 if it's 1 too high
else:
move = 1 # 1 otherwise
# Print this step
print(current, move)
# Play the game and generate the next current value
current = (current + move) // 3
# Print the last 1
print(current)
# Done!
| {
"content_hash": "cae51b0ef4b4bc46e3260440ea29c410",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 55,
"avg_line_length": 20.379310344827587,
"alnum_prop": 0.5482233502538071,
"repo_name": "fsufitch/dailyprogrammer",
"id": "e95a02e247e7770db9751747be8cdff80ffa44e0",
"size": "591",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ideas/threes/easy_solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "274"
},
{
"name": "Go",
"bytes": "55952"
},
{
"name": "HTML",
"bytes": "1759"
},
{
"name": "JavaScript",
"bytes": "1806"
},
{
"name": "PHP",
"bytes": "1006"
},
{
"name": "Python",
"bytes": "87430"
},
{
"name": "TypeScript",
"bytes": "1527"
}
],
"symlink_target": ""
} |
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=8, path_list=[
[TestAction.create_vm, 'vm1', ],
[TestAction.create_volume, 'volume1', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume, 'volume2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.create_volume_snapshot, 'vm1-root', 'vm1-root-snapshot1'],
[TestAction.create_volume_snapshot, 'vm1-root', 'vm1-root-snapshot2'],
[TestAction.create_volume_snapshot, 'volume2', 'volume2-snapshot3'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot4'],
[TestAction.create_volume_snapshot, 'volume3', 'volume3-snapshot8'],
[TestAction.create_volume_snapshot, 'vm1-root', 'vm1-root-snapshot9'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot10'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot14'],
[TestAction.stop_vm, 'vm1'],
[TestAction.use_volume_snapshot, 'vm1-snapshot4'],
[TestAction.start_vm, 'vm1'],
[TestAction.stop_vm, 'vm1'],
[TestAction.use_volume_snapshot, 'volume3-snapshot4'],
[TestAction.start_vm, 'vm1'],
[TestAction.create_vm_backup, 'vm1', 'vm1-backup1'],
[TestAction.batch_delete_snapshots, ['vm1-snapshot4','volume2-snapshot3',]],
[TestAction.batch_delete_snapshots, ['volume3-snapshot4','vm1-root-snapshot1',]],
[TestAction.batch_delete_snapshots, ['vm1-snapshot10','volume2-snapshot4',]],
])
'''
The final status:
Running:['vm1']
Stopped:[]
Enadbled:['vm1-root-snapshot2', 'volume1-snapshot4', 'volume3-snapshot8', 'vm1-root-snapshot9', 'volume1-snapshot10', 'volume2-snapshot10', 'volume3-snapshot10', 'vm1-snapshot14', 'volume1-snapshot14', 'volume2-snapshot14', 'volume3-snapshot14', 'vm1-backup1', 'volume1-backup1', 'volume2-backup1', 'volume3-backup1']
attached:['volume1', 'volume2', 'volume3']
Detached:[]
Deleted:['vm1-snapshot4', 'volume2-snapshot3', 'volume3-snapshot4', 'vm1-root-snapshot1', 'vm1-snapshot10', 'volume2-snapshot4']
Expunged:[]
Ha:[]
Group:
vm_snap3:['vm1-snapshot14', 'volume1-snapshot14', 'volume2-snapshot14', 'volume3-snapshot14']---vm1volume1_volume2_volume3
vm_backup1:['vm1-backup1', 'volume1-backup1', 'volume2-backup1', 'volume3-backup1']---vm1_volume1_volume2_volume3
'''
| {
"content_hash": "01e0ea2d7edeaefe28b931fd514bf3b4",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 317,
"avg_line_length": 49.42857142857143,
"alnum_prop": 0.7155243600330305,
"repo_name": "zstackio/zstack-woodpecker",
"id": "ced206296988a55d8ac5d3fd86e089c96efbeabc",
"size": "2422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integrationtest/vm/multihosts/vm_snapshots/paths/path22.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2356"
},
{
"name": "Go",
"bytes": "49822"
},
{
"name": "Makefile",
"bytes": "687"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "13070596"
},
{
"name": "Shell",
"bytes": "177861"
}
],
"symlink_target": ""
} |
"""
Class to extract information from OpenWRT devices
"""
__all__ = ['OpenWRT']
from netengine.backends.ssh import SSH
import json
class OpenWRT(SSH):
"""
OpenWRT SSH backend
"""
_ubus_dict = {}
_iwinfo_dict = {}
def __str__(self):
""" print a human readable object description """
return u"<SSH (OpenWRT): %s@%s>" % (self.username, self.host)
@property
def name(self):
""" get device name """
return self.run('uname -a').split(' ')[1]
@property
def os(self):
""" get os name and version, return as tuple """
# cache command output
output = self.run('cat /etc/openwrt_release')
# init empty dict
info = {}
# loop over lines of output
# parse output and store in python dict
for line in output.split('\n'):
# tidy up before filling the dictionary
key, value = line.split('=')
key = key.replace('DISTRIB_', '').lower()
value = value.replace('"', '')
# fill!
info[key] = value
os = info['id']
version = info['release']
if info.get('description'):
if info.get('revision'):
additional_info = "%(description)s, %(revision)s" % info
else:
additional_info = "%(description)s" % info
# remove redundant OpenWRT occuerrence
additional_info = additional_info.replace('OpenWrt ', '')
version = "%s (%s)" % (version, additional_info)
return (os, version)
@property
def ubus_dict(self):
if not self._ubus_dict:
self._ubus_dict = json.loads(self.run('ubus call network.device status'))
return self._ubus_dict
@property
def _ubus_interface_infos(self):
"""
returns a list of dict with infos about the interfaces
"""
list = []
for interface in self.run('ubus list').split():
if "network.interface." in interface:
list.append(json.loads(self.run('ubus call %s status' % interface)))
return list
@property
def interfaces_to_dict(self):
for interface in self._ubus_interface_infos:
for key, values in interface.iteritems():
self.ubus_dict[interface["l3_device"]][str(key)] = values
return self.ubus_dict
@property
def model(self):
""" get device model name, eg: Nanostation M5, Rocket M5 """
output = self.run('iwinfo | grep -i hardware')
# will return something like
# Hardware: 168C:002A 0777:E805 [Ubiquiti Bullet M5]
# and we'll extract only the string between square brackets
try:
return output.split('[')[1].replace(']', '')
except IndexError:
return None
@property
def wireless_mode(self):
""" retrieve wireless mode (AP/STA) """
output = self.run("iwconfig 2>/dev/null | grep Mode | awk '{print $4}' | awk -F ':' '{print $2}'")
output = output.strip()
if output == "Master":
return "ap"
else:
return "sta"
@property
def RAM_total(self):
return int(self.run("cat /proc/meminfo | grep MemTotal | awk '{print $2}'"))
@property
def uptime(self):
"""
returns an integer representing the number of seconds of uptime
"""
output = self.run('cat /proc/uptime')
seconds = float(output.split()[0])
return int(seconds)
@property
def manufacturer(self):
"""
returns a string representing the manufacturer of the device
"""
# try to determine eth0 macaddress if exist
if 'eth0' in self.ubus_dict.keys():
mac_address = self.ubus_dict['eth0']['macaddr']
manufacturer = self.get_manufacturer(mac_address)
if manufacturer:
return manufacturer
# eth0 doesn't exist, use the first not None value
for interface in self.ubus_dict.keys():
# ignore loopback interface
if interface != "lo":
mac_address = self.ubus_dict[interface]['macaddr']
manufacturer = self.get_manufacturer(mac_address)
if manufacturer:
return manufacturer
@property
def uptime_tuple(self):
"""
Return a tuple (days, hours, minutes)
"""
uptime = float(self.run('cat /proc/uptime').split()[0])
seconds = int(uptime)
minutes = int(seconds // 60)
hours = int(minutes // 60)
days = int(hours // 24)
output = days, hours, minutes
return output
def _filter_radio_interfaces(self):
"""
returns informations about wireless interfaces as per iw station wlanX dump
"""
iwinfo_result = self.run('iw wlan0 station dump')
dictionary = {}
result = iwinfo_result.split("\t")
dictionary["Station"] = result[0].strip()
key = result[1::2]
value = result[2::2]
try:
for i in range (0, len(key)):
dictionary[key[i].strip()] = str(value[i].strip())
except Exception:
pass
return dictionary
def _filter_radio(self):
"""
returns a dictionary containing the information extracted from iwinfo <device> info
"""
dictionary = {}
# in case there is no wireless interface
if not "wlan0" in self.ubus_dict:
return dictionary
iwinfo_result = self.run('iwinfo wlan0 info')
lines = iwinfo_result.split("\n")
char_occurrence = lines[0].find("ESSID")
first_line = lines[0][char_occurrence:]
key = first_line.split(":")[0].lower()
value = first_line.split(":")[1].strip().replace(" ", "-")
value = value.replace('"', "")
dictionary[key] = value
for line in lines[1:]:
if line.count(": ") == 2:
partial = line.strip().split(" ")
for element in partial:
key = element.split(":")[0].lower().replace(" ", "-")
value = element.split(":")[1].strip()
if "(" and ")" in key:
key = key.replace("(", "")
key = key.replace(")", "")
dictionary[key] = value
dictionary[key] = value
else:
key = line.split(":")[0].strip().lower().replace(" ", "-")
value = line.split(":")[1].strip()
if "(" and ")" in key:
key = key.replace("(", "")
key = key.replace(")", "")
dictionary[key] = value
dictionary[key] = value
return dictionary
def _filter_routing_protocols(self):
results = []
olsr = self.olsr
if olsr:
results.append(self._dict({
"name" : "olsr",
"version" : olsr[0]
}))
return results
def to_dict(self):
return self._dict({
"name": self.name,
"type": "radio",
"os": self.os[0],
"os_version": self.os[1],
"manufacturer": self.manufacturer,
"model": self.model,
"RAM_total": self.RAM_total,
"uptime": self.uptime,
"uptime_tuple": self.uptime_tuple,
"interfaces": self.interfaces_to_dict,
"antennas": [],
"routing_protocols": self._filter_routing_protocols()
})
| {
"content_hash": "e5034ce1574ac8751c063a2a57e0d4f7",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 106,
"avg_line_length": 33.17672413793103,
"alnum_prop": 0.5169546576588281,
"repo_name": "ninuxorg/netengine",
"id": "0799ed35645c9b76630a1a62e4a0861adba31ed4",
"size": "7697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "netengine/backends/ssh/openwrt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6783"
},
{
"name": "Python",
"bytes": "901096"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('server', '0011_auto_20171101_1255'),
]
operations = [
migrations.AddField(
model_name='event',
name='new',
field=models.BooleanField(db_index=True, default=False, verbose_name='Markierung für Neue Veranstaltungen'),
)
]
| {
"content_hash": "de2f3281898062e6dc8b3408a32b2b70",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 120,
"avg_line_length": 24.894736842105264,
"alnum_prop": 0.638477801268499,
"repo_name": "wodo/WebTool3",
"id": "d27692921027036c972f4f9e2616449870af6a4d",
"size": "547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webtool/server/migrations/0012_auto_20171109_0556.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "80"
},
{
"name": "HTML",
"bytes": "40995"
},
{
"name": "JavaScript",
"bytes": "1773"
},
{
"name": "Python",
"bytes": "346936"
},
{
"name": "TeX",
"bytes": "5006"
},
{
"name": "TypeScript",
"bytes": "158400"
}
],
"symlink_target": ""
} |
FAILEDOPERATION_APIMETAPARSEFAILED = 'FailedOperation.ApiMetaParseFailed'
# 创建应用,获取ES鉴权信息失败。
FAILEDOPERATION_APPLICATIONCREATEESATUHERROR = 'FailedOperation.ApplicationCreateEsAtuhError'
# 应用查询失败。
FAILEDOPERATION_APPLICATIONQUERYFAILED = 'FailedOperation.ApplicationQueryFailed'
# 创建集群,开通VPC网络权限失败。
FAILEDOPERATION_CLUSTERCREATEVPCFAIL = 'FailedOperation.ClusterCreateVpcFail'
# 查询集群失败。
FAILEDOPERATION_CLUSTERQUERYFAILED = 'FailedOperation.ClusterQueryFailed'
# 应用查询失败。
FAILEDOPERATION_CONFIGAPPLICATIONQUERYFAILED = 'FailedOperation.ConfigApplicationQueryFailed'
# 配置项创建失败。
FAILEDOPERATION_CONFIGCREATEFAILED = 'FailedOperation.ConfigCreateFailed'
# 部署组查询失败。
FAILEDOPERATION_CONFIGGROUPQUERYFAILED = 'FailedOperation.ConfigGroupQueryFailed'
# 命名空间查询失败。
FAILEDOPERATION_CONFIGNAMESPACEQUERYFAILED = 'FailedOperation.ConfigNamespaceQueryFailed'
# 配置项查询失败。
FAILEDOPERATION_CONFIGQUERYFAILED = 'FailedOperation.ConfigQueryFailed'
# 配置项发布信息查询失败。
FAILEDOPERATION_CONFIGRELEASEQUERYFAILED = 'FailedOperation.ConfigReleaseQueryFailed'
# 部署组处于运行状态,无法启动。
FAILEDOPERATION_CONTAINERGROUPGROUPHASRUN = 'FailedOperation.ContainergroupGroupHasrun'
# 部署组处于停止状态,无法执行此操作。
FAILEDOPERATION_CONTAINERGROUPGROUPHASSTOP = 'FailedOperation.ContainergroupGroupHasstop'
# 健康检查配置失败。
FAILEDOPERATION_CVMCAEMASTERHEALTHCHECKCONFIGERROR = 'FailedOperation.CvmCaeMasterHealthCheckConfigError'
# 远端访问错误: %s。
FAILEDOPERATION_GATEWAYREMOTECALLERROR = 'FailedOperation.GatewayRemoteCallError'
# 命名空间下存在部署组。
FAILEDOPERATION_GROUPEXISTS = 'FailedOperation.GroupExists'
# 部署组查询失败。
FAILEDOPERATION_GROUPQUERYFAILD = 'FailedOperation.GroupQueryFaild'
# 机器实例删除失败。
FAILEDOPERATION_INSTANCEDELETEFAILED = 'FailedOperation.InstanceDeleteFailed'
# 查询机器实例部分失败。
FAILEDOPERATION_INSTANCEQUERYFAILED = 'FailedOperation.InstanceQueryFailed'
# 重装系统失败,请稍后重试。若无法解决,请联系智能客服或提交工单。
FAILEDOPERATION_INSTANCERESETERROR = 'FailedOperation.InstanceResetError'
# 重装系统,请求超时。
FAILEDOPERATION_INSTANCERESETTIMEOUT = 'FailedOperation.InstanceResetTimeout'
# 机器实例更新失败。
FAILEDOPERATION_INSTANCEUPDATEFAILED = 'FailedOperation.InstanceUpdateFailed'
# 泳道从consul删除失败。
FAILEDOPERATION_LANEINFODELETECONSULFAILED = 'FailedOperation.LaneInfoDeleteConsulFailed'
# 新增关联部署组不能为空。
FAILEDOPERATION_LANEINFOGROUPNOTEMPTY = 'FailedOperation.LaneInfoGroupNotEmpty'
# 泳道同步到consul失败。
FAILEDOPERATION_LANEINFORELEASECONSULFAILED = 'FailedOperation.LaneInfoReleaseConsulFailed'
# 泳道发布到mesh失败。
FAILEDOPERATION_LANEINFORELEASEMESHFAILED = 'FailedOperation.LaneInfoReleaseMeshFailed'
# 全链路灰度规则启用失败。
FAILEDOPERATION_LANERULEENABLECONSULFAILED = 'FailedOperation.LaneRuleEnableConsulFailed'
# 用户全链路灰度规则最大100条。
FAILEDOPERATION_LANERULEMAXLIMIT = 'FailedOperation.LaneRuleMaxLimit'
# 无法创建命名空间。
FAILEDOPERATION_NAMESPACECREATEFAILED = 'FailedOperation.NamespaceCreateFailed'
# 命名空间查询失败。
FAILEDOPERATION_NAMESPACEQUERYFAILED = 'FailedOperation.NamespaceQueryFailed'
# 访问配置中心失败。
FAILEDOPERATION_RATELIMITCONSULERROR = 'FailedOperation.RatelimitConsulError'
# 服务数据库入库失败。
FAILEDOPERATION_SERVICEINSERTFAILED = 'FailedOperation.ServiceInsertFailed'
# 服务查询失败。
FAILEDOPERATION_SERVICEQUERYFAILED = 'FailedOperation.ServiceQueryFailed'
# 任务创建异常。
FAILEDOPERATION_TASKCREATEERROR = 'FailedOperation.TaskCreateError'
# 任务删除异常。
FAILEDOPERATION_TASKDELETEERROR = 'FailedOperation.TaskDeleteError'
# 操作失败。
FAILEDOPERATION_TASKOPERATIONFAILED = 'FailedOperation.TaskOperationFailed'
# 禁止操作。
FAILEDOPERATION_TASKOPERATIONFORBIDDEN = 'FailedOperation.TaskOperationForbidden'
# 任务下发异常。
FAILEDOPERATION_TASKPUSHERROR = 'FailedOperation.TaskPushError'
# 任务查询异常。
FAILEDOPERATION_TASKQUERYERROR = 'FailedOperation.TaskQueryError'
# 停止任务失败。
FAILEDOPERATION_TASKTERMINATEFAILED = 'FailedOperation.TaskTerminateFailed'
# 任务更新异常。
FAILEDOPERATION_TASKUPDATEERROR = 'FailedOperation.TaskUpdateError'
# TKE 集群创建失败,%s。
FAILEDOPERATION_TKECLUSTERCREATEFAILED = 'FailedOperation.TkeClusterCreateFailed'
# TKE 集群查询失败。
FAILEDOPERATION_TKECLUSTERQUERYFAILED = 'FailedOperation.TkeClusterQueryFailed'
# TSF应用性能管理业务日志配置解析规则查询失败。
FAILEDOPERATION_TSFAPMBUSILOGCFGSCHEMAQUERYERROR = 'FailedOperation.TsfApmBusiLogCfgSchemaQueryError'
# TSF应用性能管理业务日志配置数据库写入失败。
FAILEDOPERATION_TSFAPMBUSILOGCFGWRITEERROR = 'FailedOperation.TsfApmBusiLogCfgWriteError'
# TSF应用性能管理CTSDB客户端调用失败。
FAILEDOPERATION_TSFAPMCTSDBCLIENTREQUESTERROR = 'FailedOperation.TsfApmCtsdbClientRequestError'
# ctsdb数据库请求失败。
FAILEDOPERATION_TSFCMONITORCTSDBCLIENTREQUESTFAIL = 'FailedOperation.TsfCmonitorCtsdbClientRequestFail'
# TSF监控统计等待超时, 请稍后重试。
FAILEDOPERATION_TSFMONITORWAITEDTIMEOUT = 'FailedOperation.TsfMonitorWaitedTimeout'
# TSF权限模块异常,请联系系统管理员。。
FAILEDOPERATION_TSFPRIVILEGEERROR = 'FailedOperation.TsfPrivilegeError'
# 模块未处理异常。
FAILEDOPERATION_UNHANDLEDEXCEPTION = 'FailedOperation.UnhandledException'
# 应用操作请求MASTER FEIGN失败。
INTERNALERROR_APPLICATIONMASTERFEIGNERROR = 'InternalError.ApplicationMasterFeignError'
# 应用操作请求MASTER 操作失败。
INTERNALERROR_APPLICATIONMASTERNUKNOWNERROR = 'InternalError.ApplicationMasterNuknownError'
# 删除应用程序包请求仓库失败。
INTERNALERROR_APPLICATIONREPODELETEPKG = 'InternalError.ApplicationRepoDeletePkg'
# 创建应用初始化tsf-scalable请求失败。
INTERNALERROR_APPLICATIONSCALABLEINITERROR = 'InternalError.ApplicationScalableInitError'
# TSF云API调用申请角色临时凭证调用请求失败。
INTERNALERROR_CAMROLEREQUESTERROR = 'InternalError.CamRoleRequestError'
# 配置发布失败:无法连接配置中心服务器。
INTERNALERROR_CANNOTCONNCONSULSERVER = 'InternalError.CanNotConnConsulServer'
# TSF云API请求调用失败。
INTERNALERROR_CLOUDAPIPROXYERROR = 'InternalError.CloudApiProxyError'
# 集群通用错误。
INTERNALERROR_CLUSTERCOMMONERROR = 'InternalError.ClusterCommonError'
# 虚拟机集群请求MASTER FEIGN失败。
INTERNALERROR_CLUSTERMASTERFEIGNERROR = 'InternalError.ClusterMasterFeignError'
# 无法找到部署组,或相应集群/命名空间/应用的权限不足。
INTERNALERROR_CLUSTERNOTEXISTORPRIVILEGEERROR = 'InternalError.ClusterNotExistOrPrivilegeError'
# 配置发布失败:配置中心服务器处理失败。
INTERNALERROR_CONSULSERVERERROR = 'InternalError.ConsulServerError'
# 访问TKE服务失败。
INTERNALERROR_CONTAINERGROUPKUBERNETEAPIINVOKEERROR = 'InternalError.ContainergroupKuberneteApiInvokeError'
# 连接TKE服务失败。
INTERNALERROR_CONTAINERGROUPKUBERNETECONNECTERROR = 'InternalError.ContainergroupKuberneteConnectError'
# Kubernetes deployment 未找到。
INTERNALERROR_CONTAINERGROUPKUBERNETEDEPLOYMENTNOTFOUND = 'InternalError.ContainergroupKuberneteDeploymentNotfound'
# 容器应用SQL错误。
INTERNALERROR_CONTAINERGROUPSQLFAILED = 'InternalError.ContainergroupSqlFailed'
# 容器平台集群不可用,当前状态 %s。
INTERNALERROR_CPCLUSTERUNAVAILABLE = 'InternalError.CpClusterUnavailable'
# 命令下放失败。
INTERNALERROR_CVMCAEMASTERDISPATCHERROR = 'InternalError.CvmCaeMasterDispatchError'
# TSF MASTER 内部执行错误。
INTERNALERROR_CVMCAEMASTERINTERNALERROR = 'InternalError.CvmCaeMasterInternalError'
# MASTER通道查询失败。
INTERNALERROR_CVMCAEMASTERNONALIVE = 'InternalError.CvmCaeMasterNonAlive'
# 网关通用异常:%s。
INTERNALERROR_GATEWAYCOMMONERROR = 'InternalError.GatewayCommonError'
# 数据一致性异常:%s。
INTERNALERROR_GATEWAYCONSISTENCYERROR = 'InternalError.GatewayConsistencyError'
# 配置中心访问异常。
INTERNALERROR_GATEWAYCONSULERROR = 'InternalError.GatewayConsulError'
# 网关数据异常。
INTERNALERROR_GATEWAYDBERROR = 'InternalError.GatewayDbError'
# 部署组通用异常。
INTERNALERROR_GROUPCOMMONERROR = 'InternalError.GroupCommonError'
# 部署组操作请求MASTER 操作失败。
INTERNALERROR_GROUPMASTERNUKNOWNERROR = 'InternalError.GroupMasterNuknownError'
# tcr仓库绑定失败。
INTERNALERROR_IMAGEREPOTCRBINDERROR = 'InternalError.ImagerepoTcrBindError'
# TSF节点管理通用错误信息。
INTERNALERROR_INSTANCECOMMONERROR = 'InternalError.InstanceCommonError'
# 创建kubernetes命名空间失败。
INTERNALERROR_KUBERNETESAPICREATENAMESPACESERROR = 'InternalError.KubernetesApiCreateNamespacesError'
# 创建kubernetes密钥失败。
INTERNALERROR_KUBERNETESAPICREATESECRETERROR = 'InternalError.KubernetesApiCreateSecretError'
# kubernetes api 调用失败。
INTERNALERROR_KUBERNETESCALLERROR = 'InternalError.KubernetesCallError'
# 远程调用失败。
INTERNALERROR_REMOTESERVICECALLERROR = 'InternalError.RemoteServiceCallError'
# 仓库内部错误。
INTERNALERROR_RUNTIMEERROR = 'InternalError.RuntimeError'
# IN子句中超过1000个候选项。
INTERNALERROR_SQLTOOMANYINITEM = 'InternalError.SqlTooManyInItem'
# 任务内部异常。
INTERNALERROR_TASKINTERNALERROR = 'InternalError.TaskInternalError'
# 调用 TKE 接口失败,%s。
INTERNALERROR_TKEAPIFAILEDOPERATION = 'InternalError.TkeApiFailedOperation'
# TSF应用性能管理业务日志配置与应用关联处理错误。
INTERNALERROR_TSFAPMBUSILOGCFGAPPRELATIONMASTERERROR = 'InternalError.TsfApmBusiLogCfgAppRelationMasterError'
# TSF应用性能管理调用tsf-ms模块失败。
INTERNALERROR_TSFAPMCALLTSFMSFAILED = 'InternalError.TsfApmCallTsfMsFailed'
# TSF应用性能管理通用异常。
INTERNALERROR_TSFAPMCOMMONERROR = 'InternalError.TsfApmCommonError'
# TSF应用性能管理ES客户端响应状态异常。
INTERNALERROR_TSFAPMESRESPONSESTATUSEXCEPTION = 'InternalError.TsfApmEsResponseStatusException'
# TSF应用性能管理内部异常, 请稍后重试。
INTERNALERROR_TSFAPMINTERNALERROR = 'InternalError.TsfApmInternalError'
# TSF监控统计时间日期解析失败。
INTERNALERROR_TSFMONITORDATEPARSEFAILED = 'InternalError.TsfMonitorDateParseFailed'
# TSF监控统计内部异常, 请稍后重试。
INTERNALERROR_TSFMONITORINTERNALERROR = 'InternalError.TsfMonitorInternalError'
# [%s]模块未处理异常。。
INTERNALERROR_UNHANDLEDEXCEPTION = 'InternalError.UnhandledException'
# 参数错误。
INVALIDPARAMETER = 'InvalidParameter'
# [%s]模块接口[%s]请求不正确(400 BAD REQUEST)。。
INVALIDPARAMETER_BADREQUEST = 'InvalidParameter.BadRequest'
# TSF MASTER 实例状态异常。
INVALIDPARAMETER_CVMCAEMASTERUNKNOWNINSTANCESTATUS = 'InvalidParameter.CvmCaeMasterUnknownInstanceStatus'
# 未找到 TCR 实例或命名空间。
INVALIDPARAMETER_IMAGEREPOTCRNAMESPACENOTFOUND = 'InvalidParameter.ImagerepoTcrNamespaceNotFound'
# 参数错误。
INVALIDPARAMETER_KUBERNETESPARAMERROR = 'InvalidParameter.KubernetesParamError'
# 已经绑定灰度规则,无法删除。
INVALIDPARAMETER_LANEINFOALREADYUSED = 'InvalidParameter.LaneInfoAlreadyUsed'
# 存在同名的泳道。
INVALIDPARAMETER_LANEINFONAMEALREADYUSED = 'InvalidParameter.LaneInfoNameAlreadyUsed'
# 泳道名称格式有误。
INVALIDPARAMETER_LANEINFONAMEINVALID = 'InvalidParameter.LaneInfoNameInvalid'
# 泳道名称不能为空。
INVALIDPARAMETER_LANEINFONAMENOTEMPTY = 'InvalidParameter.LaneInfoNameNotEmpty'
# 泳道名称不能超过60个字符。
INVALIDPARAMETER_LANEINFONAMETOOLONG = 'InvalidParameter.LaneInfoNameTooLong'
# 泳道不存在。
INVALIDPARAMETER_LANEINFONOTEXIST = 'InvalidParameter.LaneInfoNotExist'
# 泳道没有设置任何入口应用。
INVALIDPARAMETER_LANEINFONOTEXISTENTRANCE = 'InvalidParameter.LaneInfoNotExistEntrance'
# 泳道备注不能超过200个字符。
INVALIDPARAMETER_LANEINFOREMARKTOOLONG = 'InvalidParameter.LaneInfoRemarkTooLong'
# 泳道规则中的泳道不存在。
INVALIDPARAMETER_LANERULEINFONOTEXIST = 'InvalidParameter.LaneRuleInfoNotExist'
# 存在同名的泳道规则名称。
INVALIDPARAMETER_LANERULENAMEALREADYUSED = 'InvalidParameter.LaneRuleNameAlreadyUsed'
# 泳道规则名称格式有误。
INVALIDPARAMETER_LANERULENAMEINVALID = 'InvalidParameter.LaneRuleNameInvalid'
# 泳道规则名称不能为空。
INVALIDPARAMETER_LANERULENAMENOTEMPTY = 'InvalidParameter.LaneRuleNameNotEmpty'
# 泳道规则名称不能超过60个字符。
INVALIDPARAMETER_LANERULENAMETOOLONG = 'InvalidParameter.LaneRuleNameTooLong'
# 泳道规则不存在。
INVALIDPARAMETER_LANERULENOTEXIST = 'InvalidParameter.LaneRuleNotExist'
# 泳道规则备注不能超过200个字符。
INVALIDPARAMETER_LANERULEREMARKTOOLONG = 'InvalidParameter.LaneRuleRemarkTooLong'
# 泳道规则标签名不能为空。
INVALIDPARAMETER_LANERULETAGNAMENOTEMPTY = 'InvalidParameter.LaneRuleTagNameNotEmpty'
# 泳道规则标签名不能超过32个字符。
INVALIDPARAMETER_LANERULETAGNAMETOOLONG = 'InvalidParameter.LaneRuleTagNameTooLong'
# 泳道规则必须设置至少一个标签。
INVALIDPARAMETER_LANERULETAGNOTEMPTY = 'InvalidParameter.LaneRuleTagNotEmpty'
# 泳道规则标签值不能超过128个字符。
INVALIDPARAMETER_LANERULETAGVALUETOOLONG = 'InvalidParameter.LaneRuleTagValueTooLong'
# 泳道规则总标签值不能超过200个字符。
INVALIDPARAMETER_LANERULETAGVALUETOTALTOOLONG = 'InvalidParameter.LaneRuleTagValueTotalTooLong'
# 包正在被使用,请先解除占用。
INVALIDPARAMETER_PACKAGEINUSE = 'InvalidParameter.PackageInUse'
# 参数错误。
INVALIDPARAMETER_PARAMERROR = 'InvalidParameter.ParamError'
# 请求参数有误。
INVALIDPARAMETER_REPOPACKAGEPARAMERROR = 'InvalidParameter.RepoPackageParamError'
# 仓库中存在软件包,请先删除软件包。
INVALIDPARAMETER_REPOSITORYNOTEMPTY = 'InvalidParameter.RepositoryNotEmpty'
# TSF应用性能管理业务日志应用标识参数错误。
INVALIDPARAMETER_TSFAPMBUSILOGCFGAPPPARAMERROR = 'InvalidParameter.TsfApmBusiLogCfgAppParamError'
# TSF应用性能管理业务日志配置与应用关联参数错误。
INVALIDPARAMETER_TSFAPMBUSILOGCFGAPPRELATIONPARAMERROR = 'InvalidParameter.TsfApmBusiLogCfgAppRelationParamError'
# TSF应用性能管理业务日志配置云账户参数错误。
INVALIDPARAMETER_TSFAPMBUSILOGCFGCLOUDPARAMERROR = 'InvalidParameter.TsfApmBusiLogCfgCloudParamError'
# TSF应用性能管理业务日志配置标识参数错误。
INVALIDPARAMETER_TSFAPMBUSILOGCFGIDPARAMERROR = 'InvalidParameter.TsfApmBusiLogCfgIdParamError'
# TSF应用性能管理业务日志配置数目参数错误。
INVALIDPARAMETER_TSFAPMBUSILOGCFGLIMITPARAMERROR = 'InvalidParameter.TsfApmBusiLogCfgLimitParamError'
# TSF应用性能管理业务日志搜索请求参数错误。
INVALIDPARAMETER_TSFAPMBUSILOGSEARCHREQUESTPARAMERROR = 'InvalidParameter.TsfApmBusiLogSearchRequestParamError'
# TSF应用性能管理运行状态统计查询请求参数错误。
INVALIDPARAMETER_TSFAPMSTATSSEARCHREQUESTPARAMERROR = 'InvalidParameter.TsfApmStatsSearchRequestParamError'
# TSF应用性能管理标准输出日志搜索请求参数错误。
INVALIDPARAMETER_TSFAPMSTDOUTSEARCHREQUESTPARAMERROR = 'InvalidParameter.TsfApmStdoutSearchRequestParamError'
# TSF应用性能管理调用链搜索请求参数错误。
INVALIDPARAMETER_TSFAPMTRACESEARCHREQUESTPARAMERROR = 'InvalidParameter.TsfApmTraceSearchRequestParamError'
# TSF监控统计请求参数[%s]非法。
INVALIDPARAMETER_TSFMONITORREQUESTPARAMILLEGAL = 'InvalidParameter.TsfMonitorRequestParamIllegal'
# 仓库批量删除包数量超过单次允许上限。
INVALIDPARAMETER_UPPERDELETELIMIT = 'InvalidParameter.UpperDeleteLimit'
# 参数取值错误。
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# 无效的微服务类型。
INVALIDPARAMETERVALUE_APPLICATIONMICROTYPEINVALID = 'InvalidParameterValue.ApplicationMicroTypeInvalid'
# 应用名称已存在,请更换其他名称。
INVALIDPARAMETERVALUE_APPLICATIONNAMEEXIST = 'InvalidParameterValue.ApplicationNameExist'
# 应用名称不能大于60字符。
INVALIDPARAMETERVALUE_APPLICATIONNAMELENGTH = 'InvalidParameterValue.ApplicationNameLength'
# 应用名称不能为空。
INVALIDPARAMETERVALUE_APPLICATIONNAMENULL = 'InvalidParameterValue.ApplicationNameNull'
# 应用名称格式不正确,只能包含小写字母、数字及分隔符("_"、"-"),且不能以分隔符开头或结尾。
INVALIDPARAMETERVALUE_APPLICATIONNAMEREGXINVALID = 'InvalidParameterValue.ApplicationNameRegxInvalid'
# 无法获取应用。
INVALIDPARAMETERVALUE_APPLICATIONNOTEXISTS = 'InvalidParameterValue.ApplicationNotExists'
# 无效的应用分页参数。
INVALIDPARAMETERVALUE_APPLICATIONPAGELIMITINVALID = 'InvalidParameterValue.ApplicationPageLimitInvalid'
# 无效的应用类型。
INVALIDPARAMETERVALUE_APPLICATIONTYPEINVALID = 'InvalidParameterValue.ApplicationTypeInvalid'
# 与同VPC其它集群CIDR冲突。
INVALIDPARAMETERVALUE_CLUSTERCIDRCONFLICT = 'InvalidParameterValue.ClusterCidrConflict'
# 集群命名已存在,请更换其他名称。
INVALIDPARAMETERVALUE_CLUSTERNAMEEXIST = 'InvalidParameterValue.ClusterNameExist'
# 集群命名不能为空。
INVALIDPARAMETERVALUE_CLUSTERNAMEREQUIRED = 'InvalidParameterValue.ClusterNameRequired'
# 创建集群,无效的地域字段。
INVALIDPARAMETERVALUE_CLUSTERREGIONINVALID = 'InvalidParameterValue.ClusterRegionInvalid'
# 非法集群类型。
INVALIDPARAMETERVALUE_CLUSTERTYPEINVALID = 'InvalidParameterValue.ClusterTypeInvalid'
# 创建集群,无效的可用区字段。
INVALIDPARAMETERVALUE_CLUSTERZONEINVALID = 'InvalidParameterValue.ClusterZoneInvalid'
# 配置项已经发布过。
INVALIDPARAMETERVALUE_CONFIGALREADYRELEASED = 'InvalidParameterValue.ConfigAlreadyReleased'
# 配置项已存在。
INVALIDPARAMETERVALUE_CONFIGEXISTS = 'InvalidParameterValue.ConfigExists'
# 配置项和部署组所属应用不一致。
INVALIDPARAMETERVALUE_CONFIGGROUPAPPLICATIONIDNOTMATCH = 'InvalidParameterValue.ConfigGroupApplicationIdNotMatch'
# 配置项名称不合规。
INVALIDPARAMETERVALUE_CONFIGNAMEINVALID = 'InvalidParameterValue.ConfigNameInvalid'
# 无法获取配置项或无权限访问。
INVALIDPARAMETERVALUE_CONFIGNOTEXISTSORPERMISSIONDENIED = 'InvalidParameterValue.ConfigNotExistsOrPermissionDenied'
# 无法获取配置项发布信息。
INVALIDPARAMETERVALUE_CONFIGRELEASENOTEXISTS = 'InvalidParameterValue.ConfigReleaseNotExists'
# 配置格式不符合YAML要求。
INVALIDPARAMETERVALUE_CONFIGVALUEFORMATINVALID = 'InvalidParameterValue.ConfigValueFormatInvalid'
# 配置项值内容大小(%s)超过限制。
INVALIDPARAMETERVALUE_CONFIGVALUETOOLONG = 'InvalidParameterValue.ConfigValueTooLong'
# 配置项版本描述不合规。
INVALIDPARAMETERVALUE_CONFIGVERSIONDESCINVALID = 'InvalidParameterValue.ConfigVersionDescInvalid'
# 配置项版本不合规。
INVALIDPARAMETERVALUE_CONFIGVERSIONINVALID = 'InvalidParameterValue.ConfigVersionInvalid'
# 该镜像被占用中。
INVALIDPARAMETERVALUE_CONTAINERGROUPIMAGETAGISINUSE = 'InvalidParameterValue.ContainerGroupImageTagIsInUse'
# 服务访问方式不能为空。
INVALIDPARAMETERVALUE_CONTAINERGROUPACCESSTYPENULL = 'InvalidParameterValue.ContainergroupAccesstypeNull'
# 所属应用ID不能为空。
INVALIDPARAMETERVALUE_CONTAINERGROUPAPPLICATIONIDNULL = 'InvalidParameterValue.ContainergroupApplicationIdNull'
# 集群 CPU 资源不足。
INVALIDPARAMETERVALUE_CONTAINERGROUPCPULIMITOVER = 'InvalidParameterValue.ContainergroupCpulimitOver'
# 部署组ID不能为空。
INVALIDPARAMETERVALUE_CONTAINERGROUPGROUPIDNULL = 'InvalidParameterValue.ContainergroupGroupidNull'
# 部署组名不能大于60个字符。
INVALIDPARAMETERVALUE_CONTAINERGROUPGROUPNAMELEGNTH = 'InvalidParameterValue.ContainergroupGroupnameLegnth'
# 部署组名不能为空。
INVALIDPARAMETERVALUE_CONTAINERGROUPGROUPNAMENULL = 'InvalidParameterValue.ContainergroupGroupnameNull'
# 部署组名称格式不正确,只能包含小写字母、数字及分隔符("-"),且必须以小写字母开头,数字或小写字母结尾。
INVALIDPARAMETERVALUE_CONTAINERGROUPGROUPNAMEREGEXMATCHFALSE = 'InvalidParameterValue.ContainergroupGroupnameRegexMatchFalse'
# 实例数量不能为空或不合法。
INVALIDPARAMETERVALUE_CONTAINERGROUPINSTANCENUMINVALID = 'InvalidParameterValue.ContainergroupInstanceNumInvalid'
# CPU limit 和 request 不能同时为空。
INVALIDPARAMETERVALUE_CONTAINERGROUPINVALIDCPUINFO = 'InvalidParameterValue.ContainergroupInvalidCpuInfo'
# 内存 limit 和 request 不能同时为空。
INVALIDPARAMETERVALUE_CONTAINERGROUPINVALIDMEMINFO = 'InvalidParameterValue.ContainergroupInvalidMemInfo'
# limit最大数量,默认 20, 最大值 50。
INVALIDPARAMETERVALUE_CONTAINERGROUPLIMITVALUEINVALID = 'InvalidParameterValue.ContainergroupLimitValueInvalid'
# 集群内存资源不足。
INVALIDPARAMETERVALUE_CONTAINERGROUPMEMLIMITOVER = 'InvalidParameterValue.ContainergroupMemlimitOver'
# 主机端口值非法。
INVALIDPARAMETERVALUE_CONTAINERGROUPNODEPORTINVALID = 'InvalidParameterValue.ContainergroupNodePortInvalid'
# 服务端口值非法。
INVALIDPARAMETERVALUE_CONTAINERGROUPPORTINVALID = 'InvalidParameterValue.ContainergroupPortInvalid'
# 服务端口不允许重复映射。
INVALIDPARAMETERVALUE_CONTAINERGROUPPORTSREPEAT = 'InvalidParameterValue.ContainergroupPortsRepeat'
# 协议值非法,限定:TCP/UDP。
INVALIDPARAMETERVALUE_CONTAINERGROUPPROTOCOLINVALID = 'InvalidParameterValue.ContainergroupProtocolInvalid'
# 公网访问方式下,协议需要一致。
INVALIDPARAMETERVALUE_CONTAINERGROUPPROTOCOLMIXERROR = 'InvalidParameterValue.ContainergroupProtocolMixError'
# 协议不能为空。
INVALIDPARAMETERVALUE_CONTAINERGROUPPROTOCOLNULL = 'InvalidParameterValue.ContainergroupProtocolNull'
# 协议端口不能为空。
INVALIDPARAMETERVALUE_CONTAINERGROUPPROTOCOLPORTSNULL = 'InvalidParameterValue.ContainergroupProtocolPortsNull'
# 镜像仓库名与应用名不匹配。
INVALIDPARAMETERVALUE_CONTAINERGROUPREPONAMEINVALID = 'InvalidParameterValue.ContainergroupReponameInvalid'
# agent 容器资源值非法 , %s。
INVALIDPARAMETERVALUE_CONTAINERGROUPRESOURCEAGENTVALUEINVALID = 'InvalidParameterValue.ContainergroupResourceAgentValueInvalid'
# 容器端口不允许重复映射。
INVALIDPARAMETERVALUE_CONTAINERGROUPTARGETPORTSREPEAT = 'InvalidParameterValue.ContainergroupTargetPortsRepeat'
# 容器端口不能为空。
INVALIDPARAMETERVALUE_CONTAINERGROUPTARGETPORTNULL = 'InvalidParameterValue.ContainergroupTargetportNull'
# 更新间隔不能为空或者数值非法。
INVALIDPARAMETERVALUE_CONTAINERGROUPUPDATEIVLINVALID = 'InvalidParameterValue.ContainergroupUpdateivlInvalid'
# updateType参数不合法,值必须为0、1。
INVALIDPARAMETERVALUE_CONTAINERGROUPUPDATETYPEINVALID = 'InvalidParameterValue.ContainergroupUpdatetypeInvalid'
# 找不到业务容器。
INVALIDPARAMETERVALUE_CONTAINERGROUPYAMLUSERCONTAINERNOTFOUND = 'InvalidParameterValue.ContainergroupYamlUserContainerNotFound'
# TSF MASTER 正在执行任务,请等待任务执行完成再下发新任务。
INVALIDPARAMETERVALUE_CVMCAEMASTERAGENTBUSY = 'InvalidParameterValue.CvmCaeMasterAgentBusy'
# 无可用实例。
INVALIDPARAMETERVALUE_CVMCAEMASTERAGENTNOTFOUND = 'InvalidParameterValue.CvmCaeMasterAgentNotFound'
# TSF MASTER 部署组中无云主机。
INVALIDPARAMETERVALUE_CVMCAEMASTERGROUPNOAGENT = 'InvalidParameterValue.CvmCaeMasterGroupNoAgent'
# 部署组不存在。
INVALIDPARAMETERVALUE_DEPLOYGROUPNOTEXISTS = 'InvalidParameterValue.DeployGroupNotExists'
# 文件配置项已经发布。
INVALIDPARAMETERVALUE_FILECONFIGALREADYRELEASED = 'InvalidParameterValue.FileConfigAlreadyReleased'
# 文件配置项已存在。
INVALIDPARAMETERVALUE_FILECONFIGEXISTS = 'InvalidParameterValue.FileConfigExists'
# 配置文件路径重复。
INVALIDPARAMETERVALUE_FILECONFIGEXISTSPATH = 'InvalidParameterValue.FileConfigExistsPath'
# 其他用户已发布此配置文件路径。
INVALIDPARAMETERVALUE_FILECONFIGEXISTSPATHOTHER = 'InvalidParameterValue.FileConfigExistsPathOther'
# 文件配置项文件路径不合规。
INVALIDPARAMETERVALUE_FILECONFIGFILEPATHINVALID = 'InvalidParameterValue.FileConfigFilePathInvalid'
# 文件配置项名称不合规。
INVALIDPARAMETERVALUE_FILECONFIGNAMEINVALID = 'InvalidParameterValue.FileConfigNameInvalid'
# 无法获取文件配置项或无权限访问。
INVALIDPARAMETERVALUE_FILECONFIGNOTEXISTSORPERMISSIONDENIED = 'InvalidParameterValue.FileConfigNotExistsOrPermissionDenied'
# 同一部署组禁止配置文件重复(文件路径+文件名)。
INVALIDPARAMETERVALUE_FILECONFIGPATHEXISTS = 'InvalidParameterValue.FileConfigPathExists'
# 文件配置项版本描述不合规。
INVALIDPARAMETERVALUE_FILECONFIGVERSIONDESCINVALID = 'InvalidParameterValue.FileConfigVersionDescInvalid'
# 请求参数异常:%s。
INVALIDPARAMETERVALUE_GATEWAYPARAMETERERROR = 'InvalidParameterValue.GatewayParameterError'
# 无效请求参数:%s。
INVALIDPARAMETERVALUE_GATEWAYPARAMETERINVALID = 'InvalidParameterValue.GatewayParameterInvalid'
# 全局命名空间已经存在,只能创建一个全局命名空间。
INVALIDPARAMETERVALUE_GLOBALNAMESPACENAMEEXIST = 'InvalidParameterValue.GlobalNamespaceNameExist'
# 部署相关请求参数校验失败。
INVALIDPARAMETERVALUE_GROUPBATCHPARAMETERINVALID = 'InvalidParameterValue.GroupBatchParameterInvalid'
# 部署组的集群未绑定该命名空间。
INVALIDPARAMETERVALUE_GROUPCLUSTERNAMESPACENOTBOUND = 'InvalidParameterValue.GroupClusterNamespaceNotBound'
# 创建分组, 集群类型不匹配。
INVALIDPARAMETERVALUE_GROUPCLUSTERTYPEMISMATCH = 'InvalidParameterValue.GroupClusterTypeMismatch'
# 删除分组,集群类型不匹配。
INVALIDPARAMETERVALUE_GROUPDELETECLUSTERTYPEMISMATCH = 'InvalidParameterValue.GroupDeleteClusterTypeMismatch'
# 部署组ID不能为空。
INVALIDPARAMETERVALUE_GROUPIDNULL = 'InvalidParameterValue.GroupIdNull'
# 部署组名称已存在,请更换其他名称。
INVALIDPARAMETERVALUE_GROUPNAMEEXIST = 'InvalidParameterValue.GroupNameExist'
# 部署组名不能大于50个字符。
INVALIDPARAMETERVALUE_GROUPNAMELENGTH = 'InvalidParameterValue.GroupNameLength'
# 部署组名称格式不正确,只能包含小写字母、数字及分隔符("-"),且必须以小写字母开头,数字或小写字母结尾。
INVALIDPARAMETERVALUE_GROUPNAMEREGXMISMATCH = 'InvalidParameterValue.GroupNameRegxMismatch'
# 无法获取部署组。
INVALIDPARAMETERVALUE_GROUPNOTEXISTS = 'InvalidParameterValue.GroupNotExists'
# 分组无效的分业参数。
INVALIDPARAMETERVALUE_GROUPPAGELIMITINVALID = 'InvalidParameterValue.GroupPageLimitInvalid'
# 无效的部署组状态过滤字段。
INVALIDPARAMETERVALUE_GROUPSTATUSINVALID = 'InvalidParameterValue.GroupStatusInvalid'
# 分组操作,无有效机器。
INVALIDPARAMETERVALUE_GROUPVALIDINSTANCENULL = 'InvalidParameterValue.GroupValidInstanceNull'
# 镜像仓库名不能为空。
INVALIDPARAMETERVALUE_IMAGEREPOREPONAMENULL = 'InvalidParameterValue.ImagerepoRepoNameNull'
# 镜像仓库名不合法,示例:tsf-repo/nginx。
INVALIDPARAMETERVALUE_IMAGEREPOREPONAMEINVALID = 'InvalidParameterValue.ImagerepoReponameInvalid'
# imageTags不能为空。
INVALIDPARAMETERVALUE_IMAGEREPOTAGNAMENULL = 'InvalidParameterValue.ImagerepoTagnameNull'
# 重装系统,无效的镜像id。
INVALIDPARAMETERVALUE_INSTANCEINVALIDIMAGE = 'InvalidParameterValue.InstanceInvalidImage'
# 参数 %s 取值错误。
INVALIDPARAMETERVALUE_INVALIDPARAMETER = 'InvalidParameterValue.InvalidParameter'
# 参数格式异常。
INVALIDPARAMETERVALUE_INVALIDPARAMETERFORMAT = 'InvalidParameterValue.InvalidParameterFormat'
# 已经绑定灰度规则,无法删除。
INVALIDPARAMETERVALUE_LANEINFOALREADYUSED = 'InvalidParameterValue.LaneInfoAlreadyUsed'
# 存在同名的泳道。
INVALIDPARAMETERVALUE_LANEINFONAMEALREADYUSED = 'InvalidParameterValue.LaneInfoNameAlreadyUsed'
# 泳道名称格式有误。
INVALIDPARAMETERVALUE_LANEINFONAMEINVALID = 'InvalidParameterValue.LaneInfoNameInvalid'
# 泳道名称不能为空。
INVALIDPARAMETERVALUE_LANEINFONAMENOTEMPTY = 'InvalidParameterValue.LaneInfoNameNotEmpty'
# 泳道名称不能超过60个字符。
INVALIDPARAMETERVALUE_LANEINFONAMETOOLONG = 'InvalidParameterValue.LaneInfoNameTooLong'
# 泳道不存在。
INVALIDPARAMETERVALUE_LANEINFONOTEXIST = 'InvalidParameterValue.LaneInfoNotExist'
# 泳道没有设置任何入口应用。
INVALIDPARAMETERVALUE_LANEINFONOTEXISTENTRANCE = 'InvalidParameterValue.LaneInfoNotExistEntrance'
# 泳道备注不能超过200个字符。
INVALIDPARAMETERVALUE_LANEINFOREMARKTOOLONG = 'InvalidParameterValue.LaneInfoRemarkTooLong'
# 全链路灰度规则中的泳道不存在。
INVALIDPARAMETERVALUE_LANERULEINFONOTEXIST = 'InvalidParameterValue.LaneRuleInfoNotExist'
# 存在同名的全链路灰度规则。
INVALIDPARAMETERVALUE_LANERULENAMEALREADYUSED = 'InvalidParameterValue.LaneRuleNameAlreadyUsed'
# 全链路灰度规则名称格式有误。
INVALIDPARAMETERVALUE_LANERULENAMEINVALID = 'InvalidParameterValue.LaneRuleNameInvalid'
# 全链路灰度规则名称不能为空。
INVALIDPARAMETERVALUE_LANERULENAMENOTEMPTY = 'InvalidParameterValue.LaneRuleNameNotEmpty'
# 全链路灰度规则名称不能超过60个字符。
INVALIDPARAMETERVALUE_LANERULENAMETOOLONG = 'InvalidParameterValue.LaneRuleNameTooLong'
# 全链路灰度规则不存在。
INVALIDPARAMETERVALUE_LANERULENOTEXIST = 'InvalidParameterValue.LaneRuleNotExist'
# 全链路灰度规则备注不能超过200个字符。
INVALIDPARAMETERVALUE_LANERULEREMARKTOOLONG = 'InvalidParameterValue.LaneRuleRemarkTooLong'
# 全链路灰度规则标签名不能为空。
INVALIDPARAMETERVALUE_LANERULETAGNAMENOTEMPTY = 'InvalidParameterValue.LaneRuleTagNameNotEmpty'
# 全链路灰度规则标签名不能超过32个字符。
INVALIDPARAMETERVALUE_LANERULETAGNAMETOOLONG = 'InvalidParameterValue.LaneRuleTagNameTooLong'
# 全链路灰度规则必须设置至少一个标签。
INVALIDPARAMETERVALUE_LANERULETAGNOTEMPTY = 'InvalidParameterValue.LaneRuleTagNotEmpty'
# 全链路灰度规则标签值不能超过128个字符。
INVALIDPARAMETERVALUE_LANERULETAGVALUETOOLONG = 'InvalidParameterValue.LaneRuleTagValueTooLong'
# 全链路灰度规则总标签值不能超过200个字符。
INVALIDPARAMETERVALUE_LANERULETAGVALUETOTALTOOLONG = 'InvalidParameterValue.LaneRuleTagValueTotalTooLong'
# 集群已关联该命名空间。
INVALIDPARAMETERVALUE_NAMESPACEALREADYBINDCLUSTER = 'InvalidParameterValue.NamespaceAlreadyBindCluster'
# 命名空间描格式不正确。
INVALIDPARAMETERVALUE_NAMESPACEDESCINVALID = 'InvalidParameterValue.NamespaceDescInvalid'
# 命名空间名称已存在,请更换其他名称。
INVALIDPARAMETERVALUE_NAMESPACENAMEEXIST = 'InvalidParameterValue.NamespaceNameExist'
# 命名空间名称格式不正确。
INVALIDPARAMETERVALUE_NAMESPACENAMEINVALID = 'InvalidParameterValue.NamespaceNameInvalid'
# 无法获取命名空间。
INVALIDPARAMETERVALUE_NAMESPACENOTEXISTS = 'InvalidParameterValue.NamespaceNotExists'
# 配置项已经发布,不允许删除。
INVALIDPARAMETERVALUE_RELEASEDCONFIGCANNOTBEDELETED = 'InvalidParameterValue.ReleasedConfigCanNotBeDeleted'
# 无权限操作资源%s。
INVALIDPARAMETERVALUE_RESOURCEPERMISSIONDENIED = 'InvalidParameterValue.ResourcePermissionDenied'
# ResourceType 不支持。
INVALIDPARAMETERVALUE_RESOURCETYPEERROR = 'InvalidParameterValue.ResourceTypeError'
# 服务描述不能大于200字符。
INVALIDPARAMETERVALUE_SERVICEDESCLENGTH = 'InvalidParameterValue.ServiceDescLength'
# 服务名称重复。
INVALIDPARAMETERVALUE_SERVICENAMEREPEATED = 'InvalidParameterValue.ServiceNameRepeated'
# 服务不存在或权限不足。
INVALIDPARAMETERVALUE_SERVICENOTEXISTSORPERMISSIONDENIED = 'InvalidParameterValue.ServiceNotExistsOrPermissionDenied'
# 无效请求参数。
INVALIDPARAMETERVALUE_TASKPARAMETERINVALID = 'InvalidParameterValue.TaskParameterInvalid'
# 仅有停止状态下的部署组才可以不启动。
INVALIDPARAMETERVALUE_WRONGDONTSTARTVALUE = 'InvalidParameterValue.WrongDontStartValue'
# 命名空间数达到上限。
LIMITEXCEEDED_ERRNAMESPACEMAXLIMIT = 'LimitExceeded.ErrNamespaceMaxLimit'
# 仓库达到上限。
LIMITEXCEEDED_ERRREPOMAXLIMIT = 'LimitExceeded.ErrRepoMaxLimit'
# 最多支持创建五个容器集群,当前已经超过使用上限。
LIMITEXCEEDED_TKECLUSTERNUMBEREXCEEDLIMIT = 'LimitExceeded.TkeClusterNumberExceedLimit'
# 应用ID不能为空。
MISSINGPARAMETER_APPLICATIONIDNULL = 'MissingParameter.ApplicationIdNull'
# 应用ID未填写。
MISSINGPARAMETER_APPLICATIONIDREQUIRED = 'MissingParameter.ApplicationIdRequired'
# 应用类型不能为空。
MISSINGPARAMETER_APPLICATIONTYPENULL = 'MissingParameter.ApplicationTypeNull'
# 集群ID未填写。
MISSINGPARAMETER_CLUSTERIDREQUIRED = 'MissingParameter.ClusterIdRequired'
# 集群所属子网不能为空。
MISSINGPARAMETER_CLUSTERSUBNETREQUIRED = 'MissingParameter.ClusterSubnetRequired'
# 配置项ID未填写。
MISSINGPARAMETER_CONFIGIDREQUIRED = 'MissingParameter.ConfigIdRequired'
# 配置项名称未填写。
MISSINGPARAMETER_CONFIGNAMEREQUIRED = 'MissingParameter.ConfigNameRequired'
# 配置项发布信息ID未填写。
MISSINGPARAMETER_CONFIGRELEASEIDREQUIRED = 'MissingParameter.ConfigReleaseIdRequired'
# 配置项类型未填写。
MISSINGPARAMETER_CONFIGTYPEREQUIRED = 'MissingParameter.ConfigTypeRequired'
# 配置项值未填写。
MISSINGPARAMETER_CONFIGVALUEREQUIRED = 'MissingParameter.ConfigValueRequired'
# 配置项版本未填写。
MISSINGPARAMETER_CONFIGVERSIONREQUIRED = 'MissingParameter.ConfigVersionRequired'
# 文件配置项文件内容未填写。
MISSINGPARAMETER_FILECONFIGFILEVALUEREQUIRED = 'MissingParameter.FileConfigFileValueRequired'
# 缺少请求参数:%s。
MISSINGPARAMETER_GATEWAYPARAMETERREQUIRED = 'MissingParameter.GatewayParameterRequired'
# 分组所属应用不能为空。
MISSINGPARAMETER_GROUPAPPLICATIONNULL = 'MissingParameter.GroupApplicationNull'
# 分组ID不能为空。
MISSINGPARAMETER_GROUPIDNULL = 'MissingParameter.GroupIdNull'
# 分组所属命名空间不能为空。
MISSINGPARAMETER_GROUPNAMESPACENULL = 'MissingParameter.GroupNamespaceNull'
# 虚拟机集群导入云主机导入方式为空。
MISSINGPARAMETER_INSTANCEIMPORTMODENULL = 'MissingParameter.InstanceImportModeNull'
# 命名空间ID不能为空。
MISSINGPARAMETER_NAMESPACEIDREQUIRED = 'MissingParameter.NamespaceIdRequired'
# %s缺失。
MISSINGPARAMETER_REQUIREDPARAMETERMISSING = 'MissingParameter.RequiredParameterMissing'
# 未填写服务Id。
MISSINGPARAMETER_SERVICEIDREQUIRED = 'MissingParameter.ServiceIdRequired'
# 缺少必填参数。
MISSINGPARAMETER_TASKPARAMETERMISSED = 'MissingParameter.TaskParameterMissed'
# 此应用下存在资源,无法执行删除操作。
RESOURCEINUSE_APPLICATIONCANNOTDELETE = 'ResourceInUse.ApplicationCannotDelete'
# 资源仍在使用中 无法删除。
RESOURCEINUSE_CVMCAEMASTERCANNOTDELETE = 'ResourceInUse.CvmcaeMasterCannotDelete'
# 默认命名空间不能被删除。
RESOURCEINUSE_DEFAULTNAMEPSACECANNOTBEDELETED = 'ResourceInUse.DefaultNamepsaceCannotBeDeleted'
# 此分组下存在资源,无法执行删除操作。
RESOURCEINUSE_GROUPCANNOTDELETE = 'ResourceInUse.GroupCannotDelete'
# 部署组在更新中 请稍后再执行该操作。
RESOURCEINUSE_GROUPINOPERATION = 'ResourceInUse.GroupInOperation'
# 机器实例已经被使用。
RESOURCEINUSE_INSTANCEHASBEENUSED = 'ResourceInUse.InstanceHasBeenUsed'
# 此命名空间下存在资源,无法执行删除操作。
RESOURCEINUSE_NAMESPACECANNOTDELETE = 'ResourceInUse.NamespaceCannotDelete'
# 资源对象已存在。
RESOURCEINUSE_OBJECTEXIST = 'ResourceInUse.ObjectExist'
# 限流规则已存在,请检查规则名和规则配置。
RESOURCEINUSE_RATELIMITRULEEXISTERROR = 'ResourceInUse.RatelimitRuleExistError'
# 仓库空间达到上限。
RESOURCEINSUFFICIENT_PACKAGESPACEFULL = 'ResourceInsufficient.PackageSpaceFull'
# 无法获取应用信息。
RESOURCENOTFOUND_APPLICATIONNOTEXIST = 'ResourceNotFound.ApplicationNotExist'
# 无法获取应用或应用不属于当前项目。
RESOURCENOTFOUND_APPLICATIONPROJECTNOTMATCH = 'ResourceNotFound.ApplicationProjectNotMatch'
# 无法获取命名空间所属集群。
RESOURCENOTFOUND_CLUSTERNOTEXIST = 'ResourceNotFound.ClusterNotExist'
# 集群所属私有网络不存在。
RESOURCENOTFOUND_CLUSTERVPCNOTEXIST = 'ResourceNotFound.ClusterVpcNotExist'
# 找不到集群。
RESOURCENOTFOUND_CONTAINERGROUPCLUSTERNOTFOUND = 'ResourceNotFound.ContainergroupClusterNotfound'
# 无法找到该部署组所属集群和命名空间。
RESOURCENOTFOUND_CONTAINERGROUPGROUPNAMESPACECLUSTERNOTFOUND = 'ResourceNotFound.ContainergroupGroupNamespaceClusterNotFound'
# 无法找到该部署组。
RESOURCENOTFOUND_CONTAINERGROUPGROUPNOTFOUND = 'ResourceNotFound.ContainergroupGroupNotFound'
# TSF MASTER 资源不存在。
RESOURCENOTFOUND_CVMCAEMASTERRESOURCENOTFOUND = 'ResourceNotFound.CvmcaeMasterResourceNotFound'
# 镜像仓库不存在。
RESOURCENOTFOUND_ERRNOREPO = 'ResourceNotFound.ErrNoRepo'
# 用户错误。
RESOURCENOTFOUND_ERRNOUSER = 'ResourceNotFound.ErrNoUser'
# 无法获取分组所属应用。
RESOURCENOTFOUND_GROUPAPPLICATIONNOTEXIST = 'ResourceNotFound.GroupApplicationNotExist'
# 无法获取分组所属命名空间。
RESOURCENOTFOUND_GROUPNAMESPACENOTEXIST = 'ResourceNotFound.GroupNamespaceNotExist'
# 此部署组不存在,无法执行该操作。
RESOURCENOTFOUND_GROUPNOTEXIST = 'ResourceNotFound.GroupNotExist'
# 无法获取机器信息。
RESOURCENOTFOUND_INSTANCENOTEXIST = 'ResourceNotFound.InstanceNotExist'
# [%s]模块未提供该接口[%s]。。
RESOURCENOTFOUND_INTERFACENOTFOUND = 'ResourceNotFound.InterfaceNotFound'
# 无法找到License服务器。
RESOURCENOTFOUND_LICENSESERVERNOTFOUND = 'ResourceNotFound.LicenseServerNotFound'
# 目标微服务已离线[%s]。。
RESOURCENOTFOUND_MICROSERVICEOFFLINE = 'ResourceNotFound.MicroserviceOffline'
# 无法获取命名空间。
RESOURCENOTFOUND_NAMESPACENOTEXIST = 'ResourceNotFound.NamespaceNotExist'
# 资源对象不存在。
RESOURCENOTFOUND_OBJECTNOEXIST = 'ResourceNotFound.ObjectNoExist'
# 无法获取服务,无法执行该操作。
RESOURCENOTFOUND_SERVICENOTEXIST = 'ResourceNotFound.ServiceNotExist'
# 任务不存在。
RESOURCENOTFOUND_TASKNOTFOUND = 'ResourceNotFound.TaskNotFound'
# TKE 中不存在该集群。
RESOURCENOTFOUND_TKECLUSTERNOTEXISTS = 'ResourceNotFound.TkeClusterNotExists'
# 访问 CAM 系统出错,%s。
UNAUTHORIZEDOPERATION_CAMGENERALERROR = 'UnauthorizedOperation.CamGeneralError'
# 协作者身份未授权,需要主账号授予协作者权限,参考 TSF 官网文档「快速入门/准备工作」。
UNAUTHORIZEDOPERATION_CAMTSFROLENOPERMISSION = 'UnauthorizedOperation.CamTsfRoleNoPermission'
# 当前主账号未创建TSF_QCSRole或未对子账号授予预设策略QcloudCamSubaccountsAuthorizeRoleFullAccess。请参考产品文档主账号协作者使用说明。。
UNAUTHORIZEDOPERATION_CAMTSFROLENOTEXIST = 'UnauthorizedOperation.CamTsfRoleNotExist'
# License未激活。。
UNAUTHORIZEDOPERATION_LICENSEINACTIVE = 'UnauthorizedOperation.LicenseInactive'
# 您所购买的服务不支持该操作。
UNAUTHORIZEDOPERATION_LICENSEUNAUTHORIZED = 'UnauthorizedOperation.LicenseUnauthorized'
# 缺少License。。
UNAUTHORIZEDOPERATION_NOLICENSE = 'UnauthorizedOperation.NoLicense'
# 用户无权限访问该接口。。
UNAUTHORIZEDOPERATION_NOPRIVILEGE = 'UnauthorizedOperation.NoPrivilege'
# 批量操作数量超过限制:%s。
UNSUPPORTEDOPERATION_GATEWAYTOOMANYREQUESTPARAMETER = 'UnsupportedOperation.GatewayTooManyRequestParameter'
# 操作不支持。
UNSUPPORTEDOPERATION_TASKNOTSUPPORTED = 'UnsupportedOperation.TaskNotSupported'
# 不支持的ACTION。。
UNSUPPORTEDOPERATION_UNSUPPORTACTION = 'UnsupportedOperation.UnsupportAction'
| {
"content_hash": "5759d6aa62fceb1fb32cf07aa5cf9743",
"timestamp": "",
"source": "github",
"line_count": 952,
"max_line_length": 127,
"avg_line_length": 34.938025210084035,
"alnum_prop": 0.8694867863263281,
"repo_name": "tzpBingo/github-trending",
"id": "f719460adcc1f84386a49deff17300abbff5cdcf",
"size": "41296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codespace/python/tencentcloud/tsf/v20180326/errorcodes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "11470"
},
{
"name": "HTML",
"bytes": "1543"
},
{
"name": "Python",
"bytes": "49985109"
},
{
"name": "Shell",
"bytes": "18039"
}
],
"symlink_target": ""
} |
"""Cloud Functions do not exceed memory limits.
Log entries indicating Cloud Functions exceeding memory limits have been found.
"""
from boltons.iterutils import get_path
from gcpdiag import lint, models
from gcpdiag.queries import gcf, logs
MATCH_STR = 'Error: memory limit exceeded.'
logs_by_project = {}
LOG_FILTER = ['severity=ERROR', f'textPayload:"{MATCH_STR}"']
def prepare_rule(context: models.Context):
logs_by_project[context.project_id] = logs.query(
project_id=context.project_id,
resource_type='cloud_function',
log_name='log_id("cloudfunctions.googleapis.com/cloud-functions")',
filter_str=' AND '.join(LOG_FILTER))
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
cloudfunctions = gcf.get_cloudfunctions(context)
if not cloudfunctions:
report.add_skipped(None, 'no functions found')
return
failed_functions = set()
query = logs_by_project[context.project_id]
for log_entry in query.entries:
if MATCH_STR not in get_path(
log_entry, ('textPayload'),
default='') or get_path(log_entry, ('severity'), default='') != 'ERROR':
continue
function_name = get_path(log_entry, ('resource', 'labels', 'function_name'),
default='')
if function_name:
failed_functions.add(function_name)
for _, cloudfunction in sorted(cloudfunctions.items()):
if cloudfunction.name in failed_functions:
available_memory = cloudfunction.memory
report.add_failed(
cloudfunction,
f'{cloudfunction.name} exceeded {available_memory} MB memory limit')
else:
report.add_ok(cloudfunction)
| {
"content_hash": "9d099f933ae5d05a90577c1d2416967c",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 80,
"avg_line_length": 33.32,
"alnum_prop": 0.6872749099639855,
"repo_name": "GoogleCloudPlatform/gcpdiag",
"id": "681fc16b60684cdebba495ce6a2b9ac79945a1ec",
"size": "2261",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "gcpdiag/lint/gcf/err_2022_003_cloudfunctions_memory_limit_exceeded.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "4610"
},
{
"name": "HCL",
"bytes": "90111"
},
{
"name": "HTML",
"bytes": "8149"
},
{
"name": "Jinja",
"bytes": "1231"
},
{
"name": "Makefile",
"bytes": "51860"
},
{
"name": "Python",
"bytes": "792739"
},
{
"name": "SCSS",
"bytes": "1435"
},
{
"name": "Shell",
"bytes": "10973"
},
{
"name": "Smarty",
"bytes": "726"
}
],
"symlink_target": ""
} |
import json
import os
import rethinkdb
from flask import Flask, Response, request
import hn
LIMIT = 700
app = Flask(__name__, static_url_path='', static_folder='public')
app.add_url_rule('/', 'root', lambda: app.send_static_file('./index.html'))
@app.route('/hnjobs', methods=['GET'])
def comments_handler():
cursor = app.table.filter({'parent': app.main_id, 'type': 'comment'})
navmode = request.args.get('navmode', '0')
if navmode == '1': # cool
cursor = cursor.filter(rethinkdb.row['cool'].gt(1))
elif navmode == '2': # uncool
cursor = cursor.filter({'cool': 0})
elif navmode == '3': # potential remote
cursor = cursor.filter({'cool': 1}).filter(rethinkdb.row['text'].match('(?i)remote'))
else: # potential
cursor = cursor.filter({'cool': 1})
jobfilter = request.args.get('filter', None)
if jobfilter:
cursor = cursor.filter(rethinkdb.row['text'].match(r'(?i){}'.format(jobfilter)))
jobs = list(cursor.limit(LIMIT).run(app.connection))
return Response(json.dumps(jobs), mimetype='application/json', headers={'Cache-Control': 'no-cache'})
@app.route('/hnjobs/latest', methods=['GET'])
def latest_handler():
counters = app.table.group('cool').count().run(app.connection)
rtv = { \
'url': 'https://news.ycombinator.com/item?id={}'.format(app.main_id),
'date': app.table.get(app.main_id).run(app.connection)['time'],
'latest': app.table.filter({'parent': app.main_id}).max('time').run(app.connection)['time'],
'liked': counters[2],
'disliked': counters[0],
'unclassified': counters[1],
'total': sum(counters.values())}
return Response(json.dumps(rtv), mimetype='application/json', headers={'Cache-Control': 'no-cache'})
@app.route('/hnjobs/update/<jobid>', methods=['DELETE', 'POST'])
def hide_a_job_handler(jobid):
success = False
try:
jobid = int(jobid)
if request.method == 'DELETE':
rtv = app.table.get(jobid).update({'cool': 0}).run(app.connection)
success = rtv.get('skipped', 1) == 0
app.logger.debug('Hide {} {}'.format(jobid, rtv))
elif request.method == 'POST':
rtv = app.table.get(jobid).update({'cool': rethinkdb.row['cool'] + 1}).run(app.connection)
success = rtv.get('skipped', 1) == 0
app.logger.debug('Updated {} {}'.format(jobid, rtv))
rtv = {'success': success}
except ValueError:
pass
return Response(json.dumps(rtv), mimetype='application/json', headers={'Cache-Control': 'no-cache'})
def main():
app.connection, app.table, app.main_id = hn.get_connection()
app.run(port=int(os.environ.get('PORT', 3000)), debug=True)
if __name__ == '__main__':
main()
| {
"content_hash": "b8cdd9d99222459a46fd19e058756112",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 105,
"avg_line_length": 39.306666666666665,
"alnum_prop": 0.5739484396200815,
"repo_name": "babo/hnjobs",
"id": "1a98b2f0028ccac4b2294a97a6911ca3c82294d4",
"size": "3570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "166"
},
{
"name": "Dart",
"bytes": "152481"
},
{
"name": "HTML",
"bytes": "1011"
},
{
"name": "JavaScript",
"bytes": "664502"
},
{
"name": "Python",
"bytes": "13774"
},
{
"name": "Shell",
"bytes": "1374"
}
],
"symlink_target": ""
} |
'''
Copyright (C) 2017 Scaleway. All rights reserved.
Use of this source code is governed by a MIT-style
license that can be found in the LICENSE file.
'''
from .tooling import yaml_params
@yaml_params
def test_system_state(host, params):
"""
Tests the system state exported by systemd.
Example:
>> - system_state: running
"""
possible_states = (
'initializing',
'starting',
'running',
'degraded',
'maintenance',
'stopping',
'offline',
'unknown'
)
if not params in possible_states:
raise RuntimeError(
('expected system state should be a string among {}, '
'got ({})').format(possible_states.join(", "), params))
res = host.run('systemctl is-system-running')
assert not res.stderr
assert res.stdout.strip() == params # params is the expected state
| {
"content_hash": "893419689e6d97b6a20c70f7229708bb",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 70,
"avg_line_length": 23.605263157894736,
"alnum_prop": 0.6042363433667781,
"repo_name": "scaleway/tim",
"id": "076270d92aeddf948f674b6985a4a1d13518336b",
"size": "897",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testinfra_tim/test_system_state.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18639"
},
{
"name": "Shell",
"bytes": "14"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.core.urlresolvers import reverse
from game.forms import GameForm
from game.models import Game, User, Coordinate
from django.http import HttpResponseRedirect, HttpResponse
from django.utils import simplejson as json
from game.functions import reveal, create_revealed_matrix, set_flag_func, check_multiple_func, update_coordinates, reveal_mines, mine_exists, create_high_scores
from itertools import chain
from django.utils.datastructures import SortedDict
def index(request):
if request.method == 'POST':
post = (request.POST)
form = GameForm(post)
if form.is_valid():
#create the user if they don't exist already
user = User.objects.filter(name=post['name'])[:1]
if not user:
new_user = User(name=post['name'])
new_user.save()
try:
user = User.objects.get(name=post['name'])
except User.DoesNotExist:
return HttpResponse('database error', status=404)
#create the game including minefield then save it to database
if post['difficulty'] == 'beginner':
game = Game(width=9,height=9,number_of_mines=10,difficulty='beginner',user=user)
elif post['difficulty'] == 'intermediate':
game = Game(width=16,height=16,number_of_mines=40,difficulty='intermediate',user=user)
elif post['difficulty'] == 'expert':
game = Game(width=30,height=16,number_of_mines=99,difficulty='expert',user=user)
game.create_minefield()
game.fields_left = game.width * game.height
game.save()
#redirect to the game page
args = {'name': user.name, 'game_id': str(game.id)}
return HttpResponseRedirect(reverse('game_start', kwargs=args))
else:
form = GameForm()
top_beginner_users = User.objects.filter(game__difficulty='beginner', game__won=True).order_by('game__won')
beginner_dict = create_high_scores(top_beginner_users)
top_inter_users = User.objects.filter(game__difficulty='intermediate', game__won=True)
inter_dict = create_high_scores(top_inter_users)
top_expert_users = User.objects.filter(game__difficulty='expert', game__won=True)
expert_dict = create_high_scores(top_expert_users)
return render(request, 'index.html', {
'form': form,
'beginner_dict': beginner_dict,
'inter_dict': inter_dict,
'expert_dict': expert_dict
})
def game_start(request, name, game_id):
#get the current game from the passed in game id
try:
game = Game.objects.get(id=game_id)
except Game.DoesNotExist:
return HttpResponse('database error', status=404)
game_number = Game.objects.filter(user__name=name).count()
if game.won or game.lost:
return HttpResponseRedirect(reverse('index'))
#save game info as session data for easy retrieval
request.session['game_data'] = {
'name': name,
'game_id' : game_id,
'mines': game.number_of_mines,
'difficulty': game.difficulty,
'height': game.height,
'width': game.width,
'mine_field': game.get_minefield_array(),
'revealed_matrix': create_revealed_matrix(game.height, game.width),
'fields_left' : game.fields_left,
'game_number': game_number,
'temp_coords': [],
'won' : False,
'lost': False
}
response = request.session['game_data']
return render (request, 'game.html', response)
def game_check(request, name, game_id):
try:
game = Game.objects.get(id=game_id)
except Game.DoesNotExist:
return HttpResponse('database error', status=404)
#get the current game data from session
game_data = request.session['game_data']
# get info from the get requests (if applicable)
if request.GET.get('x') != None: x = int(request.GET.get('x'))
if request.GET.get('y') != None: y = int(request.GET.get('y'))
set_flag = bool(request.GET.get('setFlag'))
check_multiple = bool(request.GET.get('checkMultiple'))
reload_data = bool(request.GET.get('reloadData'))
#populate the session coordinates with the coordinates from the database
if reload_data:
coordinates = Coordinate.objects.filter(game=game)
if coordinates.count > 0:
for coord in coordinates:
game_data['revealed_matrix'][coord.x][coord.y]['attr'] = coord.attr
game_data['fields_left'] = game.fields_left
else:
# if the user wants to set a flag, only do this
if set_flag:
set_flag_func(x, y, game_data)
# if the player is checking multiple fields at once via
elif check_multiple:
if check_multiple_func(x, y, game_data):
game_data['lost'] = True
game.lost = True
print "hello"
# check ONE field (the one the player has clicked)
else:
if mine_exists(x, y, game_data):
reveal_mines(game_data)
game_data['revealed_matrix'][x][y]['attr'] = 'mine'
game_data['lost'] = True
game.lost = True
else:
reveal(x, y, game_data)
# Check for game win
if game_data['fields_left'] == game_data['mines']:
game_data['won'] = True
game.won = True
#save the field left on database in case the game gets reloaded
game.fields_left = game_data['fields_left']
game.save()
# update the coordinates in the database under one save operation
update_coordinates(game_data)
game_data['temp_coords'] = []
#save the data back onto the session
request.session['game_data'] = game_data
return HttpResponse(json.dumps(game_data), mimetype='application/json') | {
"content_hash": "6f905458ce0e5df8595a5f00fa861492",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 160,
"avg_line_length": 35.62091503267974,
"alnum_prop": 0.6706422018348623,
"repo_name": "ZzCalvinzZ/minesweeper",
"id": "b8b6ccd03de8a6545bab6362cd4a4c89e9272689",
"size": "5450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "game/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1995"
},
{
"name": "HTML",
"bytes": "2699"
},
{
"name": "JavaScript",
"bytes": "4497"
},
{
"name": "Python",
"bytes": "21612"
}
],
"symlink_target": ""
} |
__author__ = 'Robert Meyer'
import numpy as np
import sys
if (sys.version_info < (2, 7, 0)):
import unittest2 as unittest
else:
import unittest
from pypet.parameter import Parameter, PickleParameter, ArrayParameter,\
SparseParameter, ObjectTable, Result, SparseResult, PickleResult, BaseParameter
from pypet.trajectory import Trajectory
import pickle
import scipy.sparse as spsp
import pypet.pypetexceptions as pex
import warnings
import pandas as pd
import pypet.utils.comparisons as comp
from pypet.utils.helpful_classes import ChainMap
from pypet.utils.explore import cartesian_product
import pypet.compat as compat
import pypet.pypetconstants as pypetconstants
from pypet.tests.testutils.ioutils import run_suite, parse_args, make_temp_dir
from pypet.tests.testutils.data import TrajectoryComparator
class ParameterTest(TrajectoryComparator):
tags = 'unittest', 'parameter'
def test_type_error_for_not_supported_data(self):
for param in self.param.values():
if not isinstance(param, PickleParameter):
with self.assertRaises(TypeError):
param._values_of_same_type(ChainMap(),ChainMap())
with self.assertRaises(TypeError):
param._equal_values(ChainMap(),ChainMap())
def test_store_load_with_hdf5(self):
traj_name = 'test_%s' % self.__class__.__name__
filename = make_temp_dir(traj_name + '.hdf5')
traj = Trajectory(name=traj_name, dynamic_imports=self.dynamic_imports,
filename = filename, overwrite_file=True)
for param in self.param.values():
traj.f_add_parameter(param)
traj.f_store()
new_traj = Trajectory(name=traj_name, dynamic_imports=self.dynamic_imports,
filename = filename)
new_traj.f_load(load_data=2)
self.compare_trajectories(traj, new_traj)
def test_store_load_with_hdf5_no_data(self):
traj_name = 'test_%s' % self.__class__.__name__
filename = make_temp_dir(traj_name + 'nodata.hdf5')
traj = Trajectory(name=traj_name, dynamic_imports=self.dynamic_imports,
filename = filename, overwrite_file=True)
for param in self.param.values():
param._data = None
traj.f_add_parameter(param)
traj.f_store()
new_traj = Trajectory(name=traj_name, dynamic_imports=self.dynamic_imports,
filename = filename)
new_traj.f_load(load_data=2)
self.compare_trajectories(traj, new_traj)
def test_type_error_for_exploring_if_range_does_not_match(self):
param = self.param['val1']
with self.assertRaises(TypeError):
param._explore(['a','b'])
with self.assertRaises(TypeError):
param._explore([ChainMap(),ChainMap()])
with self.assertRaises(ValueError):
param._explore([])
def test_cannot_expand_and_not_explore_throwing_type_error(self):
for param in self.param.values():
if not param.f_has_range():
with self.assertRaises(TypeError):
param._expand([12,33])
else:
with self.assertRaises(TypeError):
param._explore([12,33])
def test_equal_values(self):
for param in self.param.values():
self.assertTrue(param._equal_values(param.f_get(), param.f_get()))
self.assertFalse(param._equal_values(param.f_get(),23432432432))
self.assertFalse(param._equal_values(param.f_get(),ChainMap()))
if not isinstance(param, PickleParameter):
with self.assertRaises(TypeError):
self.assertFalse(param._equal_values(ChainMap(),ChainMap()))
def test_parameter_locking(self):
for param in self.param.values():
if not param.v_explored:
self.assertFalse(param.v_locked, 'Param %s is locked' % param.v_full_name)
param.f_lock()
else:
self.assertTrue(param.v_locked, 'Param %s is locked' % param.v_full_name)
with self.assertRaises(pex.ParameterLockedException):
param.f_set(3)
with self.assertRaises(pex.ParameterLockedException):
param._explore([3])
with self.assertRaises(pex.ParameterLockedException):
param._expand([3])
with self.assertRaises(pex.ParameterLockedException):
param._shrink()
with self.assertRaises(pex.ParameterLockedException):
param.f_empty()
def test_param_accepts_not_unsupported_data(self):
for param in self.param.values():
if not isinstance(param, PickleParameter):
with self.assertRaises(TypeError):
param.f_set(ChainMap())
def test_parameter_access_throws_ValueError(self):
for name,param in self.param.items():
if name in self.explore_dict:
self.assertTrue(param.f_has_range())
with self.assertRaises(ValueError):
param._set_parameter_access(1232121321321)
def test_values_of_same_type(self):
for param in self.param.values():
self.assertTrue(param._values_of_same_type(param.f_get(),param.f_get()))
if not isinstance(param.f_get(), int):
self.assertFalse(param._values_of_same_type(param.f_get(),23432432432))
self.assertFalse(param._values_of_same_type(param.f_get(),ChainMap()))
if not isinstance(param, PickleParameter):
with self.assertRaises(TypeError):
self.assertFalse(param._equal_values(ChainMap(),ChainMap()))
def test_meta_settings(self):
for key, param in self.param.items():
self.assertEqual(param.v_full_name, self.location+'.'+key)
self.assertEqual(param.v_name, key)
self.assertEqual(param.v_location, self.location)
def make_params(self):
self.param = {}
for key, val in self.data.items():
self.param[key] = Parameter(self.location+'.'+key, val, comment=key)
def setUp(self):
if not hasattr(self,'data'):
self.data={}
self.data['val0']= 1
self.data['val1']= 1.0
self.data['val2']= True
self.data['val3'] = 'String'
self.data['npfloat'] = np.array([1.0,2.0,3.0])
self.data['npfloat_2d'] = np.array([[1.0,2.0],[3.0,4.0]])
self.data['npbool']= np.array([True,False, True])
self.data['npstr'] = np.array(['Uno', 'Dos', 'Tres'])
self.data['npint'] = np.array([1,2,3])
self.data['alist'] = [1,2,3,4]
self.data['atuple'] = (1,2,3,4)
self.location = 'MyName.Is.myParam'
self.dynamic_imports = []
self.make_params()
# with self.assertRaises(AttributeError):
# self.param.val0.f_set([[1,2,3],[1,2,3]])
#Add explortation:
self.explore()
def test_get_item(self):
for paramname in self.explore_dict:
param = self.param[paramname]
val1=param.f_get_range()[1]
val2=param[1]
self.assertTrue(comp.nested_equal(val1,val2), '%s != %s' % (str(val1),str(val2)))
def test_get_data(self):
for paramname in self.param:
param = self.param[paramname]
val1=param.data
val2=param.f_get()
self.assertTrue(comp.nested_equal(val1,val2), '%s != %s' % (str(val1),str(val2)))
val3=param['data']
self.assertTrue(comp.nested_equal(val3,val2), '%s != %s' % (str(val3),str(val2)))
val1=param.default
val2=param._default
self.assertTrue(comp.nested_equal(val1,val2), '%s != %s' % (str(val1),str(val2)))
val3=param['default']
self.assertTrue(comp.nested_equal(val3,val2), '%s != %s' % (str(val3),str(val2)))
val4=param.f_get_default()
self.assertTrue(comp.nested_equal(val4,val2), '%s != %s' % (str(val4),str(val2)))
val5 = param[-1]
self.assertTrue(comp.nested_equal(val5,val2), '%s != %s' % (str(val5),str(val2)))
def test_type_error_for_get_item(self):
for name,param in self.param.items():
if not name in self.explore_dict:
with self.assertRaises(TypeError):
param[1]
def test_type_error_for_shrink(self):
for name, param in self.param.items():
if not name in self.explore_dict:
with self.assertRaises(TypeError):
param._shrink()
def explore(self):
self.explore_dict=cartesian_product({'npstr':[np.array(['Uno', 'Dos', 'Tres']),
np.array(['Cinco', 'Seis', 'Siette']),
np.array(['Ocho', 'Nueve', 'Diez'])],
'val0':[1,2,3], 'alist':[[1,2,3,4],[3,4,5,6]]})
## Explore the parameter:
for key, vallist in self.explore_dict.items():
old_data = self.param[key]._data
self.param[key]._data = None
with self.assertRaises(TypeError):
self.param[key]._explore(vallist)
self.param[key]._data = old_data
self.param[key]._explore(vallist)
def test_the_insertion_made_implicetly_in_setUp(self):
for key, val in self.data.items():
if not key in self.explore_dict:
self.param[key]._restore_default()
param_val = self.param[key].f_get()
self.assertTrue(np.all(repr(val) == repr(param_val)),'%s != %s' %(str(val),str(param_val)))
def test_expanding_type_error(self):
for name,param in self.param.items():
if not name in self.explore_dict:
#Test locking
with self.assertRaises(TypeError):
param._expand([1,2,3])
#Test wron param type
with self.assertRaises(TypeError):
param.f_unlock()
param._expand([1,2,3])
def test_rename(self):
for name,param in self.param.items():
param._rename('test.test.wirsing')
self.assertTrue(param.v_name=='wirsing')
self.assertTrue(param.v_full_name=='test.test.wirsing')
self.assertTrue(param.v_location=='test.test')
def test_expanding(self):
for name,param in self.param.items():
if name in self.explore_dict:
param.f_unlock()
param._expand(self.explore_dict[name])
self.assertTrue(len(param) == 2*len(self.explore_dict[name]),
'Expanding of %s did not work.' % name)
def test_exploration(self):
for key, vallist in self.explore_dict.items():
param = self.param[key]
for idx, val in enumerate(vallist):
assert isinstance(param, BaseParameter)
param._set_parameter_access(idx)
self.assertTrue(np.all(repr(param.f_get())==repr(val))),'%s != %s'%( str(param.f_get()),str(val))
param_val = self.param[key].f_get_range()[idx]
self.assertTrue(np.all(str(val) == str(param_val)),'%s != %s' %(str(val),str(param_val)))
param._restore_default()
self.assertTrue(param.v_explored and param.f_has_range(), 'Error for %s' % key)
val = self.data[key]
self.assertTrue(np.all(repr(param.f_get())==repr(val))),'%s != %s'%( str(param.f_get()),str(val))
def test_storage_and_loading(self):
for key, param in self.param.items():
store_dict = param._store()
# Due to smart storing the storage dict should be small and only contain 5 items or less
# 1 for data, 1 for reference, and 3 for the array/matrices/items
if param.f_has_range():
if isinstance(param,(ArrayParameter, PickleParameter)) and \
not isinstance(param, SparseParameter):
self.assertTrue(len(store_dict)<7)
# For sparse parameter it is more:
if isinstance(param, SparseParameter):
self.assertTrue(len(store_dict)<23)
constructor = param.__class__
param.f_unlock()
param.f_empty()
param = constructor('', 42)
param._load(store_dict)
param._rename(self.location+'.'+key)
self.param[key] = param
self.test_the_insertion_made_implicetly_in_setUp()
self.test_exploration()
self.test_meta_settings()
def test_pickling_without_multiprocessing(self):
for key, param in self.param.items():
param.f_unlock()
param.v_full_copy=True
dump = pickle.dumps(param)
newParam = pickle.loads(dump)
self.param[key] = newParam
self.test_exploration()
self.test_the_insertion_made_implicetly_in_setUp()
self.test_meta_settings()
def test_pickling_with_mocking_multiprocessing(self):
self.test_exploration()
for key, param in self.param.items():
param.f_unlock()
param.v_full_copy=False
dump = pickle.dumps(param)
newParam = pickle.loads(dump)
self.param[key] = newParam
if key in self.explore_dict:
self.assertTrue(not newParam.f_has_range() and newParam.v_explored)
#self.test_exploration()
self.test_the_insertion_made_implicetly_in_setUp()
self.test_meta_settings()
def test_resizing_and_deletion(self):
for key, param in self.param.items():
param.f_lock()
with self.assertRaises(pex.ParameterLockedException):
param.f_set(42)
with self.assertRaises(pex.ParameterLockedException):
param._shrink()
param.f_unlock()
if len(param)> 1:
self.assertTrue(param.f_has_range())
if param.f_has_range():
self.assertTrue(len(param)>1)
param._shrink()
self.assertTrue(len(param) == 1)
self.assertFalse(param.f_is_empty())
self.assertFalse(param.f_has_range())
param.f_empty()
self.assertTrue(param.f_is_empty())
self.assertFalse(param.f_has_range())
class ArrayParameterTest(ParameterTest):
tags = 'unittest', 'parameter', 'array'
def setUp(self):
if not hasattr(self,'data'):
self.data= {}
self.data['myemptytuple'] = ()
self.data['myemptylist'] = []
self.data['myinttuple'] = (1,2,3)
self.data['mydoubletuple'] = (42.0,43.7,33.3)
self.data['mystringtuple'] = ('Eins','zwei','dr3i')
super(ArrayParameterTest,self).setUp()
def make_params(self):
self.param = {}
for key, val in self.data.items():
self.param[key] = ArrayParameter(self.location+'.'+key, val, comment=key)
def explore(self):
matrices = []
self.explore_dict=cartesian_product({'npstr':[np.array(['Uno', 'Dos', 'Tres']),
np.array(['Cinco', 'Seis', 'Siette']),
np.array(['Ocho', 'Nueve', 'Diez'])],
'val0':[1,2,3],
'myinttuple':[(1,2,1),(4,5,6),(5,6,7)],
'alist':[[1,2,3,4],['wooot']]} ,
(('npstr','val0'),'myinttuple', 'alist'))
### Convert the explored stuff into numpy arrays
#for idx, val in enumerate(self.explore_dict['myinttuple']):
# self.explore_dict['myinttuple'][idx] = np.array(val)
## Explore the parameter:
for key, vallist in self.explore_dict.items():
self.param[key]._explore(vallist)
def test_store_load_with_hdf5(self):
return super(ArrayParameterTest, self).test_store_load_with_hdf5()
class PickleParameterTest(ParameterTest):
tags = 'unittest', 'parameter', 'pickle'
def setUp(self):
if not hasattr(self,'data'):
self.data={}
self.data['spsparse_csc'] = spsp.lil_matrix((1000,100))
self.data['spsparse_csc'][1,2] = 44.5
self.data['spsparse_csc'] = self.data['spsparse_csc'].tocsc()
self.data['spsparse_csr'] = spsp.lil_matrix((2222,22))
self.data['spsparse_csr'][1,3] = 44.5
self.data['spsparse_csr'] = self.data['spsparse_csr'].tocsr()
self.data['spsparse_lil'] = spsp.lil_matrix((111,111))
self.data['spsparse_lil'][3,2] = 44.5
super(PickleParameterTest,self).setUp()
def make_params(self):
self.param = {}
count = 0
self.protocols={}
for key, val in self.data.items():
self.param[key] = PickleParameter(self.location+'.'+key, val,
comment=key, protocol=count)
self.protocols[key]=count
count +=1
count = count % 3
def test_meta_settings(self):
for key, param in self.param.items():
self.assertEqual(param.v_full_name, self.location+'.'+key)
self.assertEqual(param.v_name, key)
self.assertEqual(param.v_location, self.location)
self.assertEqual(param.v_protocol, self.protocols[key], '%d != %d' %
(param.v_protocol, self.protocols[key]))
def explore(self):
matrices = []
for irun in range(3):
spsparse_lil = spsp.lil_matrix((111,111))
spsparse_lil[3,2] = 44.5*irun
matrices.append(spsparse_lil)
self.explore_dict=cartesian_product({'npstr':[np.array(['Uno', 'Dos', 'Tres']),
np.array(['Cinco', 'Seis', 'Siette']),
np.array(['Ocho', 'Nueve', 'Diez'])],
'val0':[1,2,3],
'spsparse_lil' : matrices}, (('npstr','val0'),'spsparse_lil'))
## Explore the parameter:
for key, vallist in self.explore_dict.items():
self.param[key]._explore(vallist)
class SparseParameterTest(ParameterTest):
tags = 'unittest', 'parameter', 'sparse'
def setUp(self):
if not hasattr(self,'data'):
self.data={}
self.data['spsparse_csc'] = spsp.lil_matrix((1000,100))
self.data['spsparse_csc'][1,2] = 44.5
self.data['spsparse_csc'] = self.data['spsparse_csc'].tocsc()
self.data['spsparse_csr'] = spsp.lil_matrix((2222,22))
self.data['spsparse_csr'][1,3] = 44.5
self.data['spsparse_csr'] = self.data['spsparse_csr'].tocsr()
self.data['spsparse_bsr'] = spsp.lil_matrix((111,111))
self.data['spsparse_bsr'][3,2] = 44.5
self.data['spsparse_bsr'] = self.data['spsparse_bsr'].tocsr().tobsr()
self.data['spsparse_dia'] = spsp.lil_matrix((111,111))
self.data['spsparse_dia'][3,2] = 44.5
self.data['spsparse_dia'] = self.data['spsparse_dia'].tocsr().todia()
super(SparseParameterTest,self).setUp()
def make_params(self):
self.param = {}
for key, val in self.data.items():
self.param[key] = SparseParameter(self.location+'.'+key, val, comment=key)
def explore(self):
matrices_csr = []
for irun in range(3):
spsparse_csr = spsp.csr_matrix((111,111))
spsparse_csr[3,2+irun] = 44.5*irun
matrices_csr.append(spsparse_csr)
matrices_csc = []
for irun in range(3):
spsparse_csc = spsp.csc_matrix((111,111))
spsparse_csc[3,2+irun] = 44.5*irun
matrices_csc.append(spsparse_csc)
matrices_bsr = []
for irun in range(3):
spsparse_bsr = spsp.csr_matrix((111,111))
spsparse_bsr[3,2+irun] = 44.5*irun
matrices_bsr.append(spsparse_bsr.tobsr())
matrices_dia = []
for irun in range(3):
spsparse_dia = spsp.csr_matrix((111,111))
spsparse_dia[3,2+irun] = 44.5*irun
matrices_dia.append(spsparse_dia.todia())
self.explore_dict=cartesian_product({'npstr':[np.array(['Uno', 'Dos', 'Tres']),
np.array(['Cinco', 'Seis', 'Siette']),
np.array(['Ocho', 'Nueve', 'Diez'])],
'val0':[1,2,3],
'spsparse_csr' : matrices_csr,
'spsparse_csc' : matrices_csc,
'spsparse_bsr' : matrices_bsr,
'spsparse_dia' : matrices_dia},
(('npstr','val0'),('spsparse_csr',
'spsparse_csc', 'spsparse_bsr','spsparse_dia')) )
## Explore the parameter:
for key, vallist in self.explore_dict.items():
self.param[key]._explore(vallist)
class ResultTest(TrajectoryComparator):
tags = 'unittest', 'result'
def make_results(self):
self.results= {}
self.results['test.res.on_constructor']=self.Constructor('test.res.on_constructor',
**self.data)
self.results['test.res.args']=self.Constructor('test.res.args')
self.results['test.res.kwargs']=self.Constructor('test.res.kwargs')
self.results['test.res.setitem']=self.Constructor('test.res.setitem')
self.results['test.res.args'].f_set(*compat.listvalues(self.data))
self.results['test.res.kwargs'].f_set(**self.data)
for key, value in self.data.items():
self.results['test.res.setitem'][key]=value
def test_set_item_via_number(self):
res = self.results['test.res.args']
res[0] = 'Hi'
res[777] = 777
self.assertTrue(getattr(res, res.v_name) == 'Hi')
self.assertTrue(res.f_get(0) == 'Hi')
self.assertTrue(getattr(res, res.v_name + '_777') == 777)
self.assertTrue(res[777] == 777)
self.assertTrue(res.f_get(777) == 777)
self.assertTrue(0 in res)
self.assertTrue(777 in res)
self.assertTrue(99999999 not in res)
del res[0]
self.assertTrue(0 not in res)
del res[777]
self.assertTrue(777 not in res)
def test_iter(self):
for res in self.results.values():
keyset1 = set([x for x in res])
keyset2 = set(res.f_to_dict().keys())
self.assertTrue(keyset1 == keyset2)
def make_constructor(self):
self.Constructor=Result
self.dynamic_imports = [Result]
def test_warning(self):
for res in self.results.values():
with warnings.catch_warnings(record=True) as w:
res._stored=True
res.XxxXXXXxxxx = 14
def test_f_get_many_items(self):
for res in self.results.values():
if 'integer' in res and 'float' in res:
myreslist = res.f_get('integer', 'float')
self.assertEqual([self.data['integer'], self.data['float']],myreslist)
def test_getattr_and_setattr(self):
for res in self.results.values():
res.iamanewvar = 42
self.assertTrue(res.iamanewvar,42)
with self.assertRaises(AttributeError):
res.iamanonexistingvar
def test_deletion_throws_error_if_item_not_there(self):
for res in self.results.values():
with self.assertRaises(AttributeError):
del res.idonotexistforsure
def test_get_data(self):
for res in self.results.values():
if len(res._data)==1:
val1=res.data
val2=res.f_get()
self.assertTrue(comp.nested_equal(val1,val2), '%s != %s' % (str(val1),str(val2)))
val3=res['data']
self.assertTrue(comp.nested_equal(val3,val2), '%s != %s' % (str(val3),str(val2)))
else:
with self.assertRaises(AttributeError):
res.data
with self.assertRaises(AttributeError):
res['data']
with self.assertRaises(ValueError):
res.f_get()
def test_f_get_errors(self):
res = Result('test')
with self.assertRaises(AttributeError):
res.f_get()
res.f_set(1,2,42)
with self.assertRaises(ValueError):
res.f_get()
def test_contains(self):
if 'test.res.kwargs' in self.results:
res = self.results['test.res.kwargs']
self.assertTrue('integer' in res)
self.assertFalse('integoAr' in res)
def test_deletion(self):
for res in self.results.values():
for key in res.f_to_dict():
delattr(res,key)
self.assertTrue(res.f_is_empty())
def test_set_numbering(self):
int_list = list(range(10))
for res in self.results.values():
res.f_set(*int_list)
self.assertEqual(res.f_get(*int_list), int_list)
def test_dir(self):
res = self.results['test.res.on_constructor']
dirlist = dir(res)
self.assertTrue('integer' in dirlist)
self.assertTrue('dict' in dirlist)
self.assertTrue('float' in dirlist)
self.assertTrue('tuple' in dirlist)
res = self.results['test.res.args']
dirlist = dir(res)
self.assertTrue(res.v_name in dirlist)
self.assertTrue('%s_1' % res.v_name in dirlist)
def setUp(self):
if not hasattr(self,'data'):
self.data={}
self.data['integer'] = 42
self.data['float'] = 42.424242
self.data['string'] = 'TestString! 66'
self.data['long'] = compat.long_type(444444444444444444)
self.data['numpy_array'] = np.array([[3232.3,323232323232.32323232],[4.,4.]])
self.data['tuple'] = (444,444,443)
self.data['list'] = ['3','4','666']
self.data['dict'] = {'a':'b','c':42, 'd': (1,2,3)}
self.data['object_table'] = ObjectTable(data={'characters':['Luke', 'Han', 'Spock'],
'Random_Values' :[42,43,44],
'Arrays': [np.array([1,2]),np.array([3, 4]), np.array([5,5])]})
self.data['pandas_frame'] = pd.DataFrame(data={'characters':['Luke', 'Han', 'Spock'],
'Random_Values' :[42,43,44],
'Doubles': [1.2,3.4,5.6]})
self.data['nested_data.hui.buh.integer'] = 42
myframe = pd.DataFrame(data ={'TC1':[1,2,3],'TC2':['Waaa',np.nan,''],'TC3':[1.2,42.2,np.nan]})
myseries = myframe['TC1']
mypanel = pd.Panel({'Item1' : pd.DataFrame(np.random.randn(4, 3)),
'Item2' : pd.DataFrame(np.random.randn(4, 2))})
self.data['series'] = myseries
self.data['panel'] = mypanel
# self.data['p4d'] = pd.Panel4D(np.random.randn(2, 2, 5, 4),
# labels=['Label1','Label2'],
# items=['Item1', 'Item2'],
# major_axis=pd.date_range('1/1/2000', periods=5),
# minor_axis=['A', 'B', 'C', 'D'])
self.make_constructor()
self.make_results()
def test_store_load_with_hdf5(self):
traj_name = 'test_%s' % self.__class__.__name__
filename = make_temp_dir(traj_name + '.hdf5')
traj = Trajectory(name=traj_name, dynamic_imports=self.dynamic_imports,
filename = filename, overwrite_file=True)
for res in self.results.values():
traj.f_add_result(res)
traj.f_store()
new_traj = Trajectory(name=traj_name, dynamic_imports=self.dynamic_imports,
filename = filename)
new_traj.f_load(load_data=2)
self.compare_trajectories(traj, new_traj)
def test_rename(self):
for name,res in self.results.items():
res._rename('test.test.wirsing')
self.assertTrue(res.v_name=='wirsing')
self.assertTrue(res.v_full_name=='test.test.wirsing')
self.assertTrue(res.v_location=='test.test')
def test_emptying(self):
for res in self.results.values():
self.assertFalse(res.f_is_empty())
res.f_empty()
self.assertTrue(res.f_is_empty())
def test_string_representation(self):
self.results['kkk']=self.Constructor('test.res.kkk', answer=42)
self.results['rrr']=self.Constructor('test.res.rrr')
for res in self.results.values():
resstrlist = []
strlen = 0
for key in res._data:
val = res._data[key]
resstr = '%s=%s, ' % (key, repr(val))
resstrlist.append(resstr)
strlen += len(resstr)
if strlen > pypetconstants.HDF5_STRCOL_MAX_VALUE_LENGTH:
break
return_string = "".join(resstrlist)
if len(return_string) > pypetconstants.HDF5_STRCOL_MAX_VALUE_LENGTH:
return_string =\
return_string[0:pypetconstants.HDF5_STRCOL_MAX_VALUE_LENGTH - 3] + '...'
else:
return_string = return_string[0:-2] # Delete the last `, `
valstr = res.f_val_to_str()
self.assertTrue(return_string==valstr)
self.assertTrue(len(valstr)<=pypetconstants.HDF5_STRCOL_MAX_VALUE_LENGTH)
def test_meta_settings(self):
for key, res in self.results.items():
self.assertEqual(res.v_full_name, key)
self.assertEqual(res.v_name, key.split('.')[-1])
self.assertEqual(res.v_location, '.'.join(key.split('.')[0:-1]))
def test_natural_naming(self):
for res_name,res in self.results.items():
for key, val1 in res.f_to_dict().items():
val2 = getattr(res, key)
self.assertTrue(comp.nested_equal(val1,val2))
def test_get_item(self):
for res_name,res in self.results.items():
for key, val1 in res.f_to_dict().items():
val2 = res[key]
self.assertTrue(comp.nested_equal(val1,val2))
def test_f_to_dict_no_copy(self):
for res_name,res in self.results.items():
for key, val1 in res.f_to_dict(copy=False).items():
val2 = res[key]
self.assertTrue(comp.nested_equal(val1,val2))
def test_Attribute_error_for_get_item(self):
for res in self.results.values():
with self.assertRaises(AttributeError):
res['IDONOTEXIST']
def test_reject_outer_data_structure(self):
for res in self.results.values():
with self.assertRaises(TypeError):
res.f_set(doesntwork=ChainMap({},{}))
def test_the_insertion_made_implicetly_in_setUp(self):
for key, val1 in self.data.items():
res = self.results['test.res.kwargs']
val2 = res[key]
if isinstance(val1, dict):
for innerkey in val1:
innerval1 = val1[innerkey]
innerval2 = val2[innerkey]
self.assertEqual(repr(innerval1), repr(innerval2),
'%s != %s' % (str(val1),str(val2)))
else:
self.assertEqual(repr(val1),repr(val2), '%s != %s' % (str(val1),str(val2)))
def test_pickling(self):
for key, res in self.results.items():
dump = pickle.dumps(res)
newRes = pickle.loads(dump)
self.results[key] = newRes
self.test_the_insertion_made_implicetly_in_setUp()
self.test_meta_settings()
def test_storage_and_loading(self):
for key, res in self.results.items():
store_dict = res._store()
constructor = res.__class__
res = constructor('')
res._load(store_dict)
res._rename(key)
self.results[key] = res
self.test_the_insertion_made_implicetly_in_setUp()
self.test_meta_settings()
class PickleResultTest(ResultTest):
tags = 'unittest', 'result', 'pickle'
def make_constructor(self):
self.Constructor=PickleResult
self.dynamic_imports = [PickleResult]
def test_reject_outer_data_structure(self):
# Since it pickles everything, it does accept all sorts of objects
pass
def test_meta_settings(self):
for key, res in self.results.items():
self.assertEqual(res.v_full_name, key)
self.assertEqual(res.v_name, key.split('.')[-1])
self.assertEqual(res.v_location, '.'.join(key.split('.')[0:-1]))
self.assertEqual(res.v_protocol, self.protocols[key])
def make_results(self):
self.results= {}
self.results['test.res.on_constructor']=self.Constructor('test.res.on_constructor',protocol=0,**self.data)
self.results['test.res.args']=self.Constructor('test.res.args',protocol=1)
self.results['test.res.kwargs']=self.Constructor('test.res.kwargs',protocol=2)
self.protocols={'test.res.on_constructor':0,
'test.res.args':1,
'test.res.kwargs':2}
self.results['test.res.args'].f_set(*compat.listvalues(self.data))
self.results['test.res.kwargs'].f_set(**self.data)
class SparseResultTest(ResultTest):
tags = 'unittest', 'result', 'sparse'
def make_constructor(self):
self.Constructor=SparseResult
self.dynamic_imports = [SparseResult]
def setUp(self):
if not hasattr(self,'data'):
self.data={}
self.data['spsparse_csc'] = spsp.csc_matrix((1000,100))
self.data['spsparse_csc'][1,2] = 44.5
self.data['spsparse_csr'] = spsp.csr_matrix((2222,22))
self.data['spsparse_csr'][1,3] = 44.5
self.data['spsparse_bsr'] = spsp.csr_matrix((111,111))
self.data['spsparse_bsr'][3,2] = 44.5
self.data['spsparse_bsr'] = self.data['spsparse_bsr'].tobsr()
self.data['spsparse_dia'] = spsp.csr_matrix((111,111))
self.data['spsparse_dia'][3,2] = 44.5
self.data['spsparse_dia'] = self.data['spsparse_dia'].todia()
super(SparseResultTest,self).setUp()
def test_illegal_naming(self):
for res in self.results.values():
data_dict = {'val'+SparseResult.IDENTIFIER:42}
with self.assertRaises(AttributeError):
res.f_set(**data_dict)
def make_results(self):
self.results= {}
self.results['test.res.on_constructor']=self.Constructor('test.res.on_constructor',
protocol=0, **self.data)
self.results['test.res.args']=self.Constructor('test.res.args')
self.results['test.res.args'].v_protocol=1
self.results['test.res.kwargs']=self.Constructor('test.res.kwargs', protocol=2)
self.results['test.res.args'].f_set(*compat.listvalues(self.data))
self.results['test.res.kwargs'].f_set(**self.data)
if __name__ == '__main__':
opt_args = parse_args()
run_suite(**opt_args) | {
"content_hash": "ef34a44243a65c15837486d3c8d4d2c6",
"timestamp": "",
"source": "github",
"line_count": 1069,
"max_line_length": 114,
"avg_line_length": 33.31057062675398,
"alnum_prop": 0.5536802493751579,
"repo_name": "nigroup/pypet",
"id": "d300a8afeb31a497ee256acfc137c0f341efecb7",
"size": "35609",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pypet/tests/unittests/parameter_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2816"
},
{
"name": "PowerShell",
"bytes": "3149"
},
{
"name": "Python",
"bytes": "1590396"
},
{
"name": "Shell",
"bytes": "3514"
}
],
"symlink_target": ""
} |
import uuid
from setuptools import setup, find_packages
from pip.req import parse_requirements
install_reqs = parse_requirements('requirements.txt', session=uuid.uuid1())
reqs = [str(req.req) for req in install_reqs]
setup(
name='twitter_mysql',
version='0.1',
description='Scripts for normalizing a twitter dataset into mysql',
author='John Robinson',
author_email='soco@uw.edu',
url='https://www.geosoco.com/projects/twitter-mysql/',
packages=['twitter_mysql'],
install_requires=reqs,
)
| {
"content_hash": "eb2fd39ce530b0c589058e18528c0a92",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 75,
"avg_line_length": 28.63157894736842,
"alnum_prop": 0.6893382352941176,
"repo_name": "emCOMP/twitter-mysql",
"id": "f3ef3c2d2858b5beba86f850d414b68fd4e7092a",
"size": "567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "84184"
}
],
"symlink_target": ""
} |
from django import template
from django.core.cache import cache
from django.template.defaultfilters import stringfilter
from django.template.loader import render_to_string
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
register = template.Library()
def __get_cached_authors_key(object):
return u'magazine_authors_{0}_{1}'.format(object.__class__.__name__,
object.pk),
def __get_cached_authors(object):
key = __get_cached_authors_key(object)
authors = cache.get(key)
if not authors:
authors = object.all_authors()
if authors:
cache.set(key, authors, 3600)
return authors
@register.simple_tag
def magazine_authors(object):
authors = __get_cached_authors(object)
if not authors:
return ''
authors = list(authors)
if len(authors) == 1:
return render_to_string('magazine/_individual_author.html',
{'author': authors[0]})
else:
final_author = render_to_string('magazine/_individual_author.html',
{'author': authors[-1]})
first_bit = ', '.join(
[render_to_string('magazine/_individual_author.html',
{'author': author}) for author in authors[0:-1]])
second_bit = ' and ' + final_author
result = first_bit + second_bit
return mark_safe(result)
@register.filter
@stringfilter
def ampersands(value, autoescape=None):
if autoescape:
esc = conditional_escape
else:
esc = lambda x: x
value = esc(value)
pretty_ampersand = u' <span class="ampersand">&</span> '
value = value.replace(' and ', pretty_ampersand)
value = value.replace(' & ', pretty_ampersand)
if not autoescape:
value = value.replace(' & ', pretty_ampersand)
return mark_safe(value)
ampersands.needs_autoescape = True
| {
"content_hash": "b63c75d23d29c4d0e1f163dd23512c7c",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 79,
"avg_line_length": 28.257142857142856,
"alnum_prop": 0.6122345803842265,
"repo_name": "dominicrodger/django-magazine",
"id": "91a7f6a92e286eda0730f6ee5ff8b7aabe6b626c",
"size": "1978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "magazine/templatetags/magazine_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "122684"
},
{
"name": "Shell",
"bytes": "5114"
}
],
"symlink_target": ""
} |
import io
import unittest
from unittest.mock import patch
from kattis import k_parsinghex
###############################################################################
class SampleInput(unittest.TestCase):
'''Problem statement sample inputs and outputs'''
def test_sample_input(self):
'''Run and assert problem statement sample input and output.'''
inputs = []
inputs.append('uyzrr0x5206aBCtrrwm0Xa8aD4poqwqr')
inputs.append('pqovx0x6d3e6-+ 230xB6fcgmmm')
inputs = '\n'.join(inputs) + '\n'
outputs = []
outputs.append('0x5206aBC 86010556')
outputs.append('0Xa8aD4 690900')
outputs.append('0x6d3e6 447462')
outputs.append('0xB6fc 46844')
outputs = '\n'.join(outputs) + '\n'
with patch('sys.stdin', io.StringIO(inputs)) as stdin,\
patch('sys.stdout', new_callable=io.StringIO) as stdout:
k_parsinghex.main()
self.assertEqual(stdout.getvalue(), outputs)
self.assertEqual(stdin.read(), '')
###############################################################################
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "cb022d78a5dd04bb627e01a0d1801f6c",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 79,
"avg_line_length": 34.55882352941177,
"alnum_prop": 0.5387234042553192,
"repo_name": "ivanlyon/exercises",
"id": "00a815992f5c6bd303e567b8f101983e45827d56",
"size": "1175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_k_parsinghex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1283"
},
{
"name": "HTML",
"bytes": "9068"
},
{
"name": "Python",
"bytes": "96419"
}
],
"symlink_target": ""
} |
"""
Services for communicating with Hacker News REST API
and Hacker News Interval REST API
"""
import json
import requests
from models import JSONSerializable
class HTTPService(object):
"""
Abstract class service that implements common methods for
all HTTP services
"""
# Base url for the service
# example: http://example.com/api/
BASE_URL = ""
# Format suffix (.json) - optional
FORMAT = ""
def build_url(self, resource, resource_id=""):
"""
Create url in the following format
BASE_URL/resource/resource_id.FORMAT
"""
url = self.BASE_URL + resource
if resource_id != "":
url = url + "/"
url = url + resource_id + self.FORMAT
return url
def exec_request(self, request, resource, resource_id="", data=None):
"""
Execute HTTP request
"""
url = self.build_url(resource, resource_id)
headers = {"content-type": "application/json"}
# Limit actions we're allowing
allowed_actions = {"get": requests.get, "post": requests.post,
"put": requests.put, "patch": requests.patch}
if request not in allowed_actions:
raise ValueError("Action %s is not allowed " % request)
# Execute HTTP request provided by request
try:
if not data:
response = allowed_actions[request](url)
else:
response = allowed_actions[request](url=url, data=data, headers=headers)
if response.status_code != requests.codes.ok and \
response.status_code != requests.codes.created:
print "Action %s failed for url %s with code %s" % \
(request, url, response.status_code)
return None
return response.json()
except requests.exceptions.RequestException, e:
print "Request exception occured "
print "Action %s failed for url %s" % (request, url)
print e
return None
def get(self, resource, resource_id=""):
"""Execute HTTP GET request to a given resource"""
return self.exec_request("get", resource, resource_id)
def post(self, resource, data):
"""
Execute HTTP POST request to a given resource
with given data
"""
data = data.to_json()
return self.exec_request(request="post", resource=resource, data=data)
def put(self, resource, resource_id, data):
"""
Execute HTTP PUT request to a given resource
with given data
"""
data = data.to_json()
return self.exec_request("put", resource, resource_id, data)
def patch(self, resource, resource_id, data):
"""
Execute HTTP PATCH request to a given resource
with given data
"""
data = json.dumps(data, default=JSONSerializable.json_encode)
return self.exec_request("patch", resource, resource_id, data)
class HackerNewsInterval(HTTPService):
"""
Service that interacts with Hacker News Interval REST API
"""
BASE_URL = "" # URL of the service
STORY_IDS_URL = "story/ids" # route for getting existing story ids
STORY_URL = "story" # route for creating/updating stories
SNAPSHOTS_URL = "snapshots" # route for creating snapshots
SNAPSHOT_URL = "snapshot" # route for updating snapshots
def __init__(self, base_url):
self.BASE_URL = base_url
def get_story_ids(self):
"""Get all story IDs that exist on HN interval"""
return self.get(self.STORY_IDS_URL)
def add_story(self, story):
"""Create new story on HN interval"""
return self.put(self.STORY_URL, str(story._id), story)
def update_story(self, story_id, score):
"""Add new score to the existing story"""
# Create a valid PATCH for updating scores
update = {"op": "add", "path": "scores", "value": score}
return self.patch(self.STORY_URL, str(story_id), update)
def create_snapshot(self, snapshot):
"""Create new snapshot on HN interval"""
return self.post(self.SNAPSHOTS_URL, snapshot)
def update_snapshot(self, snapshot):
"""Update snapshot with data"""
return self.put(self.SNAPSHOT_URL, str(snapshot._id), snapshot)
class HackerNews(HTTPService):
"""
Service that interacts with Hacker News API
"""
BASE_URL = ""
FORMAT = ".json"
TOP_STORY_URL = "topstories"
TOP_STORY_LIMIT = 100 # number of ids returned from HN API
ITEM_URL = "item"
FORMAT = ".json"
def __init__(self, base_url):
self.BASE_URL = base_url
def get_top_stories(self, limit=100):
"""Get top story IDs from HN"""
# limit the max number of stories we can fetch
if 0 > limit > HackerNews.TOP_STORY_LIMIT:
limit = 100
response = self.get(self.TOP_STORY_URL)
if response is None:
return None
else:
return response[:limit]
def get_story(self, story_id):
"""Get the story for provided ID"""
return self.get(self.ITEM_URL, str(story_id))
| {
"content_hash": "0da554c8f29a24037fe9792f8ce1dd09",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 88,
"avg_line_length": 31.817073170731707,
"alnum_prop": 0.5962054426983519,
"repo_name": "kperusko/hacker-news-interval-worker",
"id": "188648eb547ab1a7c94a62acfc0847a3047ecd4c",
"size": "5218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "services.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11905"
},
{
"name": "Shell",
"bytes": "96"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BlogPost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=300)),
('tags', models.CharField(max_length=200)),
('creationdate', models.DateTimeField()),
],
),
]
| {
"content_hash": "1cf4d8239bd85ddbd1a2af051486d9d3",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 118,
"avg_line_length": 28.454545454545453,
"alnum_prop": 0.5383386581469649,
"repo_name": "Rut0/RutoApp",
"id": "7f10500ad454192d77f78dc22f0c2e74612c2276",
"size": "696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "616"
},
{
"name": "HTML",
"bytes": "1602"
},
{
"name": "Python",
"bytes": "9271"
}
],
"symlink_target": ""
} |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Difference'] , ['ConstantTrend'] , ['Seasonal_WeekOfYear'] , ['ARX'] ); | {
"content_hash": "b1dee84bbc145dffd03f2082088f41b9",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 94,
"avg_line_length": 41.75,
"alnum_prop": 0.7245508982035929,
"repo_name": "antoinecarme/pyaf",
"id": "26d9f0be81e4fa02a28868ee5b18d728170c4c0d",
"size": "167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_ConstantTrend_Seasonal_WeekOfYear_ARX.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
"""The tests the cover command line platform."""
import logging
import pytest
from homeassistant import setup
from homeassistant.components.cover import ATTR_POSITION, ATTR_TILT_POSITION, DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_CLOSE_COVER,
SERVICE_CLOSE_COVER_TILT,
SERVICE_OPEN_COVER,
SERVICE_OPEN_COVER_TILT,
SERVICE_SET_COVER_POSITION,
SERVICE_SET_COVER_TILT_POSITION,
SERVICE_STOP_COVER,
SERVICE_TOGGLE,
SERVICE_TOGGLE_COVER_TILT,
STATE_CLOSED,
STATE_OFF,
STATE_ON,
STATE_OPEN,
STATE_UNAVAILABLE,
)
from tests.common import assert_setup_component, async_mock_service
_LOGGER = logging.getLogger(__name__)
ENTITY_COVER = "cover.test_template_cover"
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_template_state_text(hass, calls):
"""Test the state text of a template."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"value_template": "{{ states.cover.test_state.state }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.async_set("cover.test_state", STATE_OPEN)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.state == STATE_OPEN
state = hass.states.async_set("cover.test_state", STATE_CLOSED)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.state == STATE_CLOSED
async def test_template_state_boolean(hass, calls):
"""Test the value_template attribute."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"value_template": "{{ 1 == 1 }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.state == STATE_OPEN
async def test_template_position(hass, calls):
"""Test the position_template attribute."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"position_template": "{{ states.cover.test.attributes.position }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test",
},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.async_set("cover.test", STATE_CLOSED)
await hass.async_block_till_done()
entity = hass.states.get("cover.test")
attrs = dict()
attrs["position"] = 42
hass.states.async_set(entity.entity_id, entity.state, attributes=attrs)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_position") == 42.0
assert state.state == STATE_OPEN
state = hass.states.async_set("cover.test", STATE_OPEN)
await hass.async_block_till_done()
entity = hass.states.get("cover.test")
attrs["position"] = 0.0
hass.states.async_set(entity.entity_id, entity.state, attributes=attrs)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_position") == 0.0
assert state.state == STATE_CLOSED
async def test_template_tilt(hass, calls):
"""Test the tilt_template attribute."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"value_template": "{{ 1 == 1 }}",
"tilt_template": "{{ 42 }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_tilt_position") == 42.0
async def test_template_out_of_bounds(hass, calls):
"""Test template out-of-bounds condition."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"position_template": "{{ -1 }}",
"tilt_template": "{{ 110 }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_tilt_position") is None
assert state.attributes.get("current_position") is None
async def test_template_mutex(hass, calls):
"""Test that only value or position template can be used."""
with assert_setup_component(0, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"value_template": "{{ 1 == 1 }}",
"position_template": "{{ 42 }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
"icon_template": "{% if states.cover.test_state.state %}"
"mdi:check"
"{% endif %}",
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all() == []
async def test_template_open_or_position(hass, calls):
"""Test that at least one of open_cover or set_position is used."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {"value_template": "{{ 1 == 1 }}"}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all() == []
async def test_template_open_and_close(hass, calls):
"""Test that if open_cover is specified, close_cover is too."""
with assert_setup_component(0, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"value_template": "{{ 1 == 1 }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all() == []
async def test_template_non_numeric(hass, calls):
"""Test that tilt_template values are numeric."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"position_template": "{{ on }}",
"tilt_template": "{% if states.cover.test_state.state %}"
"on"
"{% else %}"
"off"
"{% endif %}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_tilt_position") is None
assert state.attributes.get("current_position") is None
async def test_open_action(hass, calls):
"""Test the open_cover command."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"position_template": "{{ 0 }}",
"open_cover": {"service": "test.automation"},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.state == STATE_CLOSED
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_close_stop_action(hass, calls):
"""Test the close-cover and stop_cover commands."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"position_template": "{{ 100 }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {"service": "test.automation"},
"stop_cover": {"service": "test.automation"},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.state == STATE_OPEN
await hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
await hass.services.async_call(
DOMAIN, SERVICE_STOP_COVER, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
assert len(calls) == 2
async def test_set_position(hass, calls):
"""Test the set_position command."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"input_number",
{"input_number": {"test": {"min": "0", "max": "100", "initial": "42"}}},
)
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"position_template": "{{ states.input_number.test.state | int }}",
"set_cover_position": {
"service": "input_number.set_value",
"entity_id": "input_number.test",
"data_template": {"value": "{{ position }}"},
},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.async_set("input_number.test", 42)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.state == STATE_OPEN
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_position") == 100.0
await hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_position") == 0.0
await hass.services.async_call(
DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_position") == 100.0
await hass.services.async_call(
DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_position") == 0.0
await hass.services.async_call(
DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: ENTITY_COVER, ATTR_POSITION: 25},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_position") == 25.0
async def test_set_tilt_position(hass, calls):
"""Test the set_tilt_position command."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"position_template": "{{ 100 }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
"set_cover_tilt_position": {"service": "test.automation"},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
await hass.services.async_call(
DOMAIN,
SERVICE_SET_COVER_TILT_POSITION,
{ATTR_ENTITY_ID: ENTITY_COVER, ATTR_TILT_POSITION: 42},
blocking=True,
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_open_tilt_action(hass, calls):
"""Test the open_cover_tilt command."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"position_template": "{{ 100 }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
"set_cover_tilt_position": {"service": "test.automation"},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER_TILT, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_close_tilt_action(hass, calls):
"""Test the close_cover_tilt command."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"position_template": "{{ 100 }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
"set_cover_tilt_position": {"service": "test.automation"},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
await hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER_TILT, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_set_position_optimistic(hass, calls):
"""Test optimistic position mode."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"set_cover_position": {"service": "test.automation"}
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_position") is None
await hass.services.async_call(
DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: ENTITY_COVER, ATTR_POSITION: 42},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_position") == 42.0
await hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.state == STATE_CLOSED
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.state == STATE_OPEN
await hass.services.async_call(
DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.state == STATE_CLOSED
await hass.services.async_call(
DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.state == STATE_OPEN
async def test_set_tilt_position_optimistic(hass, calls):
"""Test the optimistic tilt_position mode."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"position_template": "{{ 100 }}",
"set_cover_position": {"service": "test.automation"},
"set_cover_tilt_position": {"service": "test.automation"},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_tilt_position") is None
await hass.services.async_call(
DOMAIN,
SERVICE_SET_COVER_TILT_POSITION,
{ATTR_ENTITY_ID: ENTITY_COVER, ATTR_TILT_POSITION: 42},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_tilt_position") == 42.0
await hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER_TILT, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_tilt_position") == 0.0
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER_TILT, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_tilt_position") == 100.0
await hass.services.async_call(
DOMAIN, SERVICE_TOGGLE_COVER_TILT, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_tilt_position") == 0.0
await hass.services.async_call(
DOMAIN, SERVICE_TOGGLE_COVER_TILT, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_tilt_position") == 100.0
async def test_icon_template(hass, calls):
"""Test icon template."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"value_template": "{{ states.cover.test_state.state }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
"icon_template": "{% if states.cover.test_state.state %}"
"mdi:check"
"{% endif %}",
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("icon") == ""
state = hass.states.async_set("cover.test_state", STATE_OPEN)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes["icon"] == "mdi:check"
async def test_entity_picture_template(hass, calls):
"""Test icon template."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"value_template": "{{ states.cover.test_state.state }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
"entity_picture_template": "{% if states.cover.test_state.state %}"
"/local/cover.png"
"{% endif %}",
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("entity_picture") == ""
state = hass.states.async_set("cover.test_state", STATE_OPEN)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes["entity_picture"] == "/local/cover.png"
async def test_availability_template(hass, calls):
"""Test availability template."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"value_template": "open",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
"availability_template": "{{ is_state('availability_state.state','on') }}",
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
hass.states.async_set("availability_state.state", STATE_OFF)
await hass.async_block_till_done()
assert hass.states.get("cover.test_template_cover").state == STATE_UNAVAILABLE
hass.states.async_set("availability_state.state", STATE_ON)
await hass.async_block_till_done()
assert hass.states.get("cover.test_template_cover").state != STATE_UNAVAILABLE
async def test_availability_without_availability_template(hass, calls):
"""Test that component is available if there is no."""
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"value_template": "open",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.state != STATE_UNAVAILABLE
async def test_invalid_availability_template_keeps_component_available(hass, caplog):
"""Test that an invalid availability keeps the device available."""
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"availability_template": "{{ x - 12 }}",
"value_template": "open",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.get("cover.test_template_cover") != STATE_UNAVAILABLE
assert ("UndefinedError: 'x' is undefined") in caplog.text
async def test_device_class(hass, calls):
"""Test device class."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"value_template": "{{ states.cover.test_state.state }}",
"device_class": "door",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("device_class") == "door"
async def test_invalid_device_class(hass, calls):
"""Test device class."""
with assert_setup_component(0, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"value_template": "{{ states.cover.test_state.state }}",
"device_class": "barnacle_bill",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert not state
| {
"content_hash": "012fb5e4a5776cfc99d0adbdcd41952e",
"timestamp": "",
"source": "github",
"line_count": 1017,
"max_line_length": 103,
"avg_line_length": 34.64896755162242,
"alnum_prop": 0.45782961575571823,
"repo_name": "postlund/home-assistant",
"id": "9980691085bd8aab98e72b127e30aecc1d01fa09",
"size": "35238",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/template/test_cover.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20215859"
},
{
"name": "Shell",
"bytes": "6663"
}
],
"symlink_target": ""
} |
import os, sys
sys.path.insert(0, os.getcwd())
import json
import platform
import subprocess
import sys
from glob import glob
VERSION = "v0.7"
import logging
logging.basicConfig(level=logging.INFO, format="[%(asctime)s %(filename)s:%(lineno)d %(levelname)s] %(message)s")
from code.common.system_list import system_list
def is_xavier():
return platform.processor() == "aarch64"
def check_mig_enabled(gpuid):
p = subprocess.Popen("nvidia-smi mig -lgi -i {gpu}".format(gpu=gpuid), universal_newlines=True, shell=True, stdout=subprocess.PIPE)
for line in p.stdout:
if "No MIG-enabled devices found" in line:
return False
return True
def get_mig_compute_instances(gpuid):
p = subprocess.Popen("nvidia-smi -L", universal_newlines=True, shell=True, stdout=subprocess.PIPE)
instances = []
for line in p.stdout:
toks = line.split()
if len(toks) == 6 and toks[1] != "MIG":
instances.append(toks[5].replace(')',''))
return instances
def get_system_id():
arch = platform.processor()
if is_xavier():
# The only officially support aarch64 platform is Jetson Xavier
with open("/sys/firmware/devicetree/base/model") as product_f:
product_name = product_f.read()
if "jetson" in product_name.lower():
if "AGX" in product_name:
return "AGX_Xavier"
elif "NX" in product_name:
return "Xavier_NX"
else:
raise RuntimeError("Unrecognized aarch64 device. Only AGX Xavier and Xavier NX are supported.")
if check_mig_enabled(0):
compute_instances = get_mig_compute_instances(0)
number_of_instances = len(compute_instances)
logging.info("Found {:} compute instances".format(number_of_instances))
if number_of_instances == 7: # There is only a single configuration that can provide 7 instances
return "A100-SXM4x1-MIG_1x1g.5gb"
elif get_mig_compute_instances(0) == 3 or get_mig_compute_instances(0) == 2 or get_mig_compute_instances(0) == 1:
raise RuntimeError("Repo only supports 1x1g.5gb configuration")
else:
raise RuntimeError("Unknown MIG configuration.")
try:
import pycuda.driver
import pycuda.autoinit
name = pycuda.driver.Device(0).name()
count_actual = pycuda.driver.Device.count()
except:
nvidia_smi_out = run_command("nvidia-smi -L", get_output=True, tee=False)
# Strip empty lines
tmp = [ line for line in nvidia_smi_out if len(line) > 0 ]
count_actual = len(tmp)
if count_actual == 0:
raise RuntimeError("nvidia-smi did not detect any GPUs:\n{:}".format(nvidia_smi_out))
# Format: GPU #: <name> (UUID: <uuid>)
name = tmp[0].split("(")[0].split(": ")[1].strip()
system_id, matched, closest = ("", "", -1000)
for system in system_list:
if system[1] not in name:
continue
# Match exact name with higher priority than partial name
if matched == name and system[1] != name:
continue
closer = (abs(count_actual - system[2]) < abs(count_actual - closest))
if closer or (matched != name and system[1] == name):
system_id, matched, closest = system
if closest == -1000:
raise RuntimeError("Cannot find valid configs for {:d}x {:}. Please pass in config path using --configs=<PATH>.".format(count_actual, name))
elif closest != count_actual:
logging.warn("Cannot find valid configs for {:d}x {:}. Using {:d}x {:} configs instead.".format(count_actual, name, closest, name))
return system_id
class BENCHMARKS:
# Official names for benchmarks
ResNet50 = "resnet50"
SSDResNet34 = "ssd-resnet34"
SSDMobileNet = "ssd-mobilenet"
RNNT = "rnnt"
DLRM = "dlrm"
BERT = "bert"
UNET = "3d-unet"
ALL = [ResNet50, SSDResNet34, SSDMobileNet, BERT, DLRM, UNET, RNNT]
HIGH_ACC_ENABLED = { BERT, DLRM, UNET }
# Whatever we might call it
alias_map = {
"resnet": ResNet50,
"Resnet": ResNet50,
"ResNet": ResNet50,
"resnet50": ResNet50,
"Resnet50": ResNet50,
"ResNet50": ResNet50,
"SSDResNet34": SSDResNet34,
"SSD-ResNet34": SSDResNet34,
"ssd-resnet34": SSDResNet34,
"ssd-large": SSDResNet34,
"SSDMobileNet": SSDMobileNet,
"SSD-MobileNet": SSDMobileNet,
"ssd-mobilenet": SSDMobileNet,
"ssd-small": SSDMobileNet,
"RNNT": RNNT,
"RNN-T": RNNT,
"rnnt": RNNT,
"rnn-t": RNNT,
"DLRM": DLRM,
"dlrm": DLRM,
"BERT": BERT,
"bert": BERT,
"UNET": UNET,
"Unet": UNET,
"unet": UNET,
"3d-unet": UNET,
"3DUnet": UNET,
"3D-Unet": UNET
}
def alias(name):
if not name in BENCHMARKS.alias_map:
raise ValueError("Unknown benchmark: {:}".format(name))
return BENCHMARKS.alias_map[name]
class SCENARIOS:
# Official names for scenarios
SingleStream = "SingleStream"
MultiStream = "MultiStream"
Offline = "Offline"
Server = "Server"
ALL = [SingleStream, MultiStream, Offline, Server]
# Whatever we might call it
alias_map = {
"SingleStream": SingleStream,
"Singlestream": SingleStream,
"singlestream": SingleStream,
"single_stream": SingleStream,
"single-stream": SingleStream,
"Single-Stream": SingleStream,
"MultiStream": MultiStream,
"Multistream": MultiStream,
"multistream": MultiStream,
"multi_stream": MultiStream,
"multi-stream": MultiStream,
"Multi-Stream": MultiStream,
"Offline": Offline,
"offline": Offline,
"Server": Server,
"server": Server
}
def alias(name):
if not name in SCENARIOS.alias_map:
raise ValueError("Unknown scenario: {:}".format(name))
return SCENARIOS.alias_map[name]
def run_command(cmd, get_output=False, tee=True, custom_env=None):
"""
Runs a command.
Args:
cmd (str): The command to run.
get_output (bool): If true, run_command will return the stdout output. Default: False.
tee (bool): If true, captures output (if get_output is true) as well as prints output to stdout. Otherwise, does
not print to stdout.
"""
logging.info("Running command: {:}".format(cmd))
if not get_output:
return subprocess.check_call(cmd, shell=True)
else:
output = []
if custom_env is not None:
logging.info("Overriding Environment")
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, env=custom_env)
else:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
for line in iter(p.stdout.readline, b""):
line = line.decode("utf-8")
if tee:
sys.stdout.write(line)
sys.stdout.flush()
output.append(line.rstrip("\n"))
ret = p.wait()
if ret == 0:
return output
else:
raise subprocess.CalledProcessError(ret, cmd)
def args_to_string(d, blacklist=[], delimit=True, double_delimit=False):
flags = []
for flag in d:
# Skip unset
if d[flag] is None:
continue
# Skip blacklisted
if flag in blacklist:
continue
if type(d[flag]) is bool:
if d[flag] is True:
flags.append("--{:}=true".format(flag))
elif d[flag] is False:
flags.append("--{:}=false".format(flag))
elif type(d[flag]) in [int, float] or not delimit:
flags.append("--{:}={:}".format(flag, d[flag]))
else:
if double_delimit:
flags.append("--{:}=\\\"{:}\\\"".format(flag, d[flag]))
else:
flags.append("--{:}=\"{:}\"".format(flag, d[flag]))
return " ".join(flags)
def flags_bool_to_int(d):
for flag in d:
if type(d[flag]) is bool:
if d[flag]:
d[flag] = 1
else:
d[flag] = 0
return d
def dict_get(d, key, default=None):
val = d.get(key, default)
return default if val is None else val
def find_config_files(benchmarks, scenarios):
config_file_candidates = ["configs/{:}/{:}/config.json".format(benchmark, scenario)
for scenario in scenarios
for benchmark in benchmarks
]
# Only return existing files
config_file_candidates = [i for i in config_file_candidates if os.path.exists(i)]
return ",".join(config_file_candidates)
def load_configs(config_files):
configs = []
for config in config_files.split(","):
file_locs = glob(config)
if len(file_locs) == 0:
raise ValueError("Config file {:} cannot be found.".format(config))
for file_loc in file_locs:
with open(file_loc) as f:
logging.info("Parsing config file {:} ...".format(file_loc))
configs.append(json.load(f))
return configs
| {
"content_hash": "77e4de17b26781ff537c95c9573fc500",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 148,
"avg_line_length": 34.11439114391144,
"alnum_prop": 0.5846403461330449,
"repo_name": "mlperf/inference_results_v0.7",
"id": "166e0ce955818c1295355cf3c06469ed3cbd3e20",
"size": "9856",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "closed/Gigabyte/code/common/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "148628"
},
{
"name": "C++",
"bytes": "14551146"
},
{
"name": "CMake",
"bytes": "380597"
},
{
"name": "Cuda",
"bytes": "3604332"
},
{
"name": "Dockerfile",
"bytes": "32985"
},
{
"name": "Makefile",
"bytes": "103953"
},
{
"name": "Objective-C",
"bytes": "5470"
},
{
"name": "Python",
"bytes": "11627827"
},
{
"name": "Roff",
"bytes": "153"
},
{
"name": "Shell",
"bytes": "349257"
}
],
"symlink_target": ""
} |
import cherrypy
import sys
import mysql.connector
from collections import OrderedDict
#Define database variables
DATABASE_USER = 'root'
DATABASE_HOST = '127.0.0.1'
DATABASE_NAME = 'feedND'
#Create connection to MySQL
cnx = mysql.connector.connect(user=DATABASE_USER, host=DATABASE_HOST, database=DATABASE_NAME)
cursor = cnx.cursor()
class ExampleApp(object):
@cherrypy.expose
def index(self):
#d = {'Subway':427.0, "O'Rourke's Public House":632.0, 'The Mark Dine & Tap':730.0}
#OrderedDict(sorted(d.items(), key=lambda t: t[1]))
result = """
<!DOCTYPE html>
<html>
<head>
<title>FeedND</title>
<style>
ul {
list-style-type: none;
margin: 0;
padding: 0;
overflow: hidden;
}
li {
float: left;
}
a {
display: block;
width: 120px;
background-color: #dddddd;
font-size: 120%;
}
th, td {
padding: 5px;
}
th {
text-align: left;
}
</style>
</head>
<body>
<h1>FeedND</h1>
<ul>
<li><a href="">Orders</a></li>
<li><a href="">Restaurants</a></li>
<li><a href="">Account</a></li>
</ul>
<p></p>
<table>
<tr>
<th>Location</th>
<th>Address</th>
</tr>
"""
#for item in reversed(d.items()):
# result += "<tr><td>"+item[0]+"</td><td>"+str(item[1])+"</tr>"
cursor.execute('select name, address, state from restaurants')
row = cursor.fetchone()
while (cursor is not None) and (row is not None):
result += "<tr><td>"+row[0]+"</td><td>"+row[1]+", "+row[2]+"</tr>"
row = cursor.fetchone()
result += "</table></body></html>"
#Define database variables
cnx.close()
return result
@cherrypy.expose
def showdb(self):
cnx = mysql.connector.connect(user='test', password='mypass',
host='127.0.0.1',
database='testdb')
cursor = cnx.cursor()
query = ("SELECT firstname,lastname,email FROM Invitations")
cursor.execute(query)
info = str()
print cursor
for (firstname, lastname, email) in cursor:
info = info + "Full Name:" + lastname + firstname + "Email: "+email
return info
application = cherrypy.Application(ExampleApp(), None)
| {
"content_hash": "a5d23c478f846dd1a44a738dedf0cec3",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 93,
"avg_line_length": 24.696629213483146,
"alnum_prop": 0.586442220200182,
"repo_name": "zyz29/yzhou9-webapps",
"id": "760213ce9bf57514cc7f63e97e35d4ca368bfe7c",
"size": "2198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hw2/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "71396"
},
{
"name": "CSS",
"bytes": "287532"
},
{
"name": "HTML",
"bytes": "28031"
},
{
"name": "JavaScript",
"bytes": "23861"
},
{
"name": "Python",
"bytes": "105203"
}
],
"symlink_target": ""
} |
from ._visual import (Text, Line, Triangle, Rectangle, Circle, RawImage,
Diamond, ConcentricCircles, FixationDot, _convert_color,
_Triangular)
| {
"content_hash": "8f3b35fb23876440c3d9bb942005232e",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 78,
"avg_line_length": 62.333333333333336,
"alnum_prop": 0.5989304812834224,
"repo_name": "lkishline/expyfun",
"id": "72f528a4af02ebce178d8eaff3c4d528f5254f7a",
"size": "187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "expyfun/visual/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1018"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "390894"
}
],
"symlink_target": ""
} |
def method():
if True:
return 1
#begin nocover
else:
return 2
#end nocover
| {
"content_hash": "6fa8d903de27faba5758c59271e96090",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 18,
"avg_line_length": 15.285714285714286,
"alnum_prop": 0.514018691588785,
"repo_name": "cortesi/pry",
"id": "10ac026720c64e2fbd9c9fc4f422e73e337ab12f",
"size": "107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/project/module/two.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "104838"
},
{
"name": "Shell",
"bytes": "321"
}
],
"symlink_target": ""
} |
from ipaddress import IPv4Interface as IPv4Int
from nose.tools import assert_equals, assert_raises
from nettool.nettest import NetTest as nu
class TestConversion(object):
def setup(self):
self.netmasks = list()
self.wildcards = list()
for x in range(0, 33):
netmask = IPv4Int(u'255.255.255.255/{0}'.format(x)).network.network_address.exploded
self.netmasks.append(netmask)
wildcard = IPv4Int(u'0.0.0.0/{0}'.format(x)).network.hostmask.exploded
self.wildcards.append(wildcard)
def test_netmask_to_wildcard_conversion(self):
for index, netmask in enumerate(self.netmasks):
assert_equals(nu.convert.netmask.wildcard(netmask), self.wildcards[index])
def test_netmask_to_prefix_conversion(self):
for index, netmask in enumerate(self.netmasks):
assert_equals(nu.convert.netmask.prefix(netmask), index)
def test_wildcard_to_netmask_conversion(self):
for index, wildcard in enumerate(self.wildcards):
assert_equals(nu.convert.wildcard.netmask(wildcard), self.netmasks[index])
def test_wildcard_to_prefix_conversion(self):
for index, wildcard in enumerate(self.wildcards):
assert_equals(nu.convert.wildcard.prefix(wildcard), index)
def test_prefix_to_netmask_conversion(self):
for prefix in range(0, 33):
assert_equals(nu.convert.prefix.netmask(prefix), self.netmasks[prefix])
def test_prefix_to_wildcard_conversion(self):
for prefix in range(0, 33):
assert_equals(nu.convert.prefix.wildcard(prefix), self.wildcards[prefix])
def test_netmask_to_wildcard_conversion_invalid_input(self):
assert_raises(ValueError, nu.convert.netmask.wildcard, 1)
def test_netmask_to_prefix_conversion_invalid_input(self):
assert_raises(ValueError, nu.convert.netmask.prefix, 1)
def test_wildcard_to_netmask_conversion_invalid_input(self):
assert_raises(ValueError, nu.convert.wildcard.netmask, 1)
def test_wildcard_to_prefix_conversion_invalid_input(self):
assert_raises(ValueError, nu.convert.wildcard.prefix, 1)
def test_prefix_to_netmask_conversion_invalid_input(self):
assert_raises(ValueError, nu.convert.prefix.netmask, 'invalid')
def test_prefix_to_wildcard_conversion_invalid_input(self):
assert_raises(ValueError, nu.convert.prefix.wildcard, 'invalid')
def test_convert_string_to_cidr(self):
assert nu.convert.string.cidr('1.2.3.1/24') == '1.2.3.0/24'
assert nu.convert.string.cidr('1.2.3.0 255.255.255.0') == '1.2.3.0/24'
assert nu.convert.string.cidr('1.2.3.0 0.0.0.255') == '1.2.3.0/24'
assert nu.convert.string.cidr('1.2.3.0 255.255.255.255') == '1.2.3.0/32'
def test_convert_string_to_network_address(self):
assert nu.convert.string.network_address('1.2.3.1/24') == '1.2.3.0'
assert nu.convert.string.network_address('1.2.3.0 255.255.255.0') == '1.2.3.0'
assert nu.convert.string.network_address('1.2.3.0 255.255.255.255') == '1.2.3.0'
assert nu.convert.string.network_address('1.2.3.0 0.0.0.255') == '1.2.3.0'
def test_convert_string_to_broadcast(self):
assert nu.convert.string.broadcast_address('1.2.3.1/24') == '1.2.3.255'
assert nu.convert.string.broadcast_address('1.2.3.0 255.255.255.0') == '1.2.3.255'
assert nu.convert.string.broadcast_address('1.2.3.0 255.255.255.255') == '1.2.3.0'
assert nu.convert.string.broadcast_address('1.2.3.0 0.0.0.255') == '1.2.3.255'
def test_convert_string_longest_match(self):
assert nu.convert.string.network_longest_match('1.2.3.1/24') == '1.2.3.'
assert nu.convert.string.network_longest_match('1.2.3.1/8') == '1.'
assert nu.convert.string.network_longest_match('1.2.3.0 255.255.255.0') == '1.2.3.'
assert nu.convert.string.network_longest_match('1.2.3.0 255.255.255.255') == '1.2.3.0'
assert nu.convert.string.network_longest_match('1.2.3.0 0.0.0.255') == '1.2.3.'
def test_convert_string_to_ip(self):
assert nu.convert.string.ip('1.2.3.1/24') == '1.2.3.1'
assert nu.convert.string.ip('1.2.3.0 255.255.255.0') == '1.2.3.0'
assert nu.convert.string.ip('1.2.3.0 255.255.255.255') == '1.2.3.0'
assert nu.convert.string.ip('1.2.3.0 0.0.0.255') == '1.2.3.0'
def test_convert_string_to_hostname(self):
assert_equals(nu.convert.string.hostname('host name.example.com'), 'host-name.example.com')
assert_equals(nu.convert.string.hostname('(host)name.example.com'), 'host-name.example.com')
assert_equals(nu.convert.string.hostname('HOSTNAME.EXAMPLE.COM'), 'hostname.example.com')
assert_equals(nu.convert.string.hostname('-hostname.example.com.'), 'hostname.example.com')
assert_equals(nu.convert.string.hostname('host_name.example.com'), 'host-name.example.com')
assert_equals(nu.convert.string.hostname(' hostname.example.com '), 'hostname.example.com')
hostname = nu.convert.string.hostname(' hostname(a)-1.example.com ')
assert_equals(hostname, 'hostname-a-1.example.com')
assert_equals(nu.convert.string.hostname(u'ø.example.com'), 'o.example.com')
assert_equals(nu.convert.string.hostname(u'å.example.com'), 'a.example.com')
assert_equals(nu.convert.string.hostname('host/a.example.com'), 'host-a.example.com')
assert_equals(nu.convert.string.hostname('host\\a.example.com'), 'host-a.example.com')
assert_equals(nu.convert.string.hostname('host:a.example.com'), 'host-a.example.com')
def test_convert_string_to_host(self):
assert_equals(nu.convert.string.hostname('host name'), 'host-name')
assert_equals(nu.convert.string.hostname('(host)name'), 'host-name')
assert_equals(nu.convert.string.hostname('HOSTNAME'), 'hostname')
assert_equals(nu.convert.string.hostname('-hostname.'), 'hostname')
assert_equals(nu.convert.string.hostname('host_name'), 'host-name')
assert_equals(nu.convert.string.hostname(' hostname '), 'hostname')
assert_equals(nu.convert.string.hostname(u'ø'), 'o')
assert_equals(nu.convert.string.hostname(u'å'), 'a')
| {
"content_hash": "978de9840ceae609beacee5a68007993",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 100,
"avg_line_length": 54.28695652173913,
"alnum_prop": 0.6650648726573762,
"repo_name": "heyglen/netobj",
"id": "0681f3f1b8bb06bd1058dad65ce2c71aded10079",
"size": "6272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/test_conversion.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "140443"
}
],
"symlink_target": ""
} |
import imp
from migrate.versioning import api
from app import db
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
migration=SQLALCHEMY_MIGRATE_REPO+'/versions/%03d_migration.py' % (api.db_version(SQLALCHEMY_DATABASE_URI,SQLALCHEMY_MIGRATE_REPO)+1)
tmp_module=imp.new_module('old_model')
old_model=api.create_model(SQLALCHEMY_DATABASE_URI,SQLALCHEMY_MIGRATE_REPO)
exec old_model in tmp_module.__dict__
script=api.make_update_script_for_model(SQLALCHEMY_DATABASE_URI,SQLALCHEMY_MIGRATE_REPO,tmp_module.meta,db.metadata)
open(migration,'wt').write(script)
api.upgrade(SQLALCHEMY_DATABASE_URI,SQLALCHEMY_MIGRATE_REPO)
print 'New migration saved as '+migration
print 'Current database version:'+str(api.db_version(SQLALCHEMY_DATABASE_URI,SQLALCHEMY_MIGRATE_REPO)) | {
"content_hash": "2c21d35fd2556fdb924b5860773fd828",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 133,
"avg_line_length": 54,
"alnum_prop": 0.8,
"repo_name": "ljhandlwt/flask-study",
"id": "e237fb6f5a960decdf0759e5bc60375daa44c29b",
"size": "810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask1/db_migrate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4268"
},
{
"name": "HTML",
"bytes": "20552"
},
{
"name": "JavaScript",
"bytes": "3550"
},
{
"name": "Python",
"bytes": "17382"
}
],
"symlink_target": ""
} |
"""
Django settings for app project.
"""
DEBUG = True
ROOT_URLCONF = 'app'
MIDDLEWARE_CLASSES = []
SECRET_KEY = 'secret_key'
TIME_ZONE = 'UTC'
USE_TZ = True
| {
"content_hash": "14f6a805f4dca6e16b9ab53cd6b989ef",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 32,
"avg_line_length": 11.571428571428571,
"alnum_prop": 0.6481481481481481,
"repo_name": "vvv-v13/backend-tools",
"id": "c5f26513198b06918f13971f67d161b2989bb3bd",
"size": "162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/django/settings.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Go",
"bytes": "4254"
},
{
"name": "JavaScript",
"bytes": "2656"
},
{
"name": "Python",
"bytes": "7038"
},
{
"name": "Shell",
"bytes": "229"
}
],
"symlink_target": ""
} |
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as testcommand
with open('test_requirements.txt') as test_reqs:
tests_require = test_reqs.readlines(),
class PyTest(testcommand):
user_options = testcommand.user_options[:]
user_options += [
('coverage', 'C', 'Produce a coverage report for PyLmod'),
('pep8', 'P', 'Produce a pep8 report for PyLmod'),
('flakes', 'F', 'Produce a flakes report for PyLmod'),
]
coverage = None
pep8 = None
flakes = None
test_suite = False
test_args = []
def initialize_options(self):
testcommand.initialize_options(self)
def finalize_options(self):
testcommand.finalize_options(self)
self.test_suite = True
self.test_args = []
if self.coverage:
self.test_args.append('--cov')
self.test_args.append('pylmod')
if self.pep8:
self.test_args.append('--pep8')
if self.flakes:
self.test_args.append('--flakes')
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
# Needed in order for pytest_cache to load properly
# Alternate fix: import pytest_cache and pass to pytest.main
import _pytest.config
pm = _pytest.config.get_plugin_manager()
pm.consider_setuptools_entrypoints()
errno = pytest.main(self.test_args)
sys.exit(errno)
README = open('README.rst').read()
setup(
name='pylmod',
version='0.1.0',
license='BSD',
author='MIT ODL Engineering',
author_email='odl-engineering@mit.edu',
url="http://github.com/mitodl/pylmod",
description="PyLmod is a Python Implementation of MIT Learning Modules",
long_description=README,
packages=find_packages(),
install_requires=["requests>=2.5.1", ],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Programming Language :: Python',
],
test_suite="pylmod.tests",
tests_require=tests_require,
cmdclass={"test": PyTest},
include_package_data=True,
zip_safe=True,
)
| {
"content_hash": "a571b39fa2635b143b476834322f8b44",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 76,
"avg_line_length": 29.473684210526315,
"alnum_prop": 0.6245535714285714,
"repo_name": "amir-qayyum-khan/PyLmod",
"id": "49ad80219e56888361adee4c77a7e02058583c01",
"size": "2241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "107350"
}
],
"symlink_target": ""
} |
from decimal import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
denoms = [
('denom_0.05', 0.05),
('denom_0.1', 0.1),
('denom_0.5', 0.5),
('denom_1', 1),
('denom_10', 10),
('denom_25', 25),
('denom_100', 100),
]
class ListSigmaMintValidationWithFundsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def setup_nodes(self):
# This test requires mocktime
enable_mocktime()
return start_nodes(self.num_nodes, self.options.tmpdir)
def run_test(self):
getcontext().prec = 6
self.sync_all()
assert not self.nodes[0].listsigmamints(False), 'List sigma own mints should be empty.'
assert not self.nodes[0].listsigmamints(True), 'List sigma all mints should be empty.'
assert_raises(JSONRPCException, self.nodes[0].listsigmamints, 'Some data')
assert_raises(JSONRPCException, self.nodes[0].listsigmamints, 1)
for input_data in denoms:
case_name, denom = input_data
self.nodes[0].mint(denom)
self.nodes[0].generate(10)
self.sync_all()
listsigmamints = self.nodes[0].listsigmamints(False)
# check that for 0 node all mints shown correct
assert len(listsigmamints) == len(denoms), \
'Amount of mints should be equal to expected for this node.' \
'Expected: {}, Actual: {}.'.format(len(denoms), len(listsigmamints))
sigmamints = [str(Decimal(mint['denomination'])/100000000) for mint in listsigmamints]
exp_sigmamints = [str(denom[1]) for denom in denoms]
assert sorted(exp_sigmamints) == sorted(sigmamints), \
'Unexpected sigmamints shown in listsigmamints.'
if __name__ == '__main__':
ListSigmaMintValidationWithFundsTest().main()
| {
"content_hash": "eb3fd79bacc9da30627c9614b1ecefbe",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 95,
"avg_line_length": 31.885245901639344,
"alnum_prop": 0.6231362467866324,
"repo_name": "zcoinofficial/zcoin",
"id": "bcf8321dae50c6337bedb230e021330f827b2cd6",
"size": "1968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/sigma_listsigmamints_validation.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "977651"
},
{
"name": "C",
"bytes": "23449469"
},
{
"name": "C++",
"bytes": "11590916"
},
{
"name": "CMake",
"bytes": "96751"
},
{
"name": "CSS",
"bytes": "42324"
},
{
"name": "Dockerfile",
"bytes": "3182"
},
{
"name": "Gnuplot",
"bytes": "940"
},
{
"name": "HTML",
"bytes": "55527"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "Lua",
"bytes": "3321"
},
{
"name": "M4",
"bytes": "354106"
},
{
"name": "Makefile",
"bytes": "176315"
},
{
"name": "NASL",
"bytes": "177"
},
{
"name": "Objective-C++",
"bytes": "6795"
},
{
"name": "PHP",
"bytes": "4871"
},
{
"name": "POV-Ray SDL",
"bytes": "1480"
},
{
"name": "Perl",
"bytes": "18265"
},
{
"name": "Python",
"bytes": "1731667"
},
{
"name": "QMake",
"bytes": "1352"
},
{
"name": "Roff",
"bytes": "2388"
},
{
"name": "Ruby",
"bytes": "3216"
},
{
"name": "Rust",
"bytes": "119897"
},
{
"name": "Sage",
"bytes": "30192"
},
{
"name": "Shell",
"bytes": "314196"
},
{
"name": "SmPL",
"bytes": "5488"
},
{
"name": "SourcePawn",
"bytes": "12001"
},
{
"name": "q",
"bytes": "5584"
}
],
"symlink_target": ""
} |
"""Low-level http related exceptions."""
__all__ = ('HttpProcessingError',)
class HttpProcessingError(Exception):
"""HTTP error.
Shortcut for raising HTTP errors with custom code, message and headers.
:param int code: HTTP Error code.
:param str message: (optional) Error message.
:param list of [tuple] headers: (optional) Headers to be sent in response.
"""
code = 0
message = ''
headers = None
def __init__(self, *, code=None, message='', headers=None):
if code is not None:
self.code = code
self.headers = headers
self.message = message
super().__init__("%s, message='%s'" % (self.code, message))
class BadHttpMessage(HttpProcessingError):
code = 400
message = 'Bad Request'
def __init__(self, message, *, headers=None):
super().__init__(message=message, headers=headers)
class HttpBadRequest(BadHttpMessage):
code = 400
message = 'Bad Request'
class ContentEncodingError(BadHttpMessage):
"""Content encoding error."""
class TransferEncodingError(BadHttpMessage):
"""transfer encoding error."""
class LineTooLong(BadHttpMessage):
def __init__(self, line, limit='Unknown'):
super().__init__(
"Got more than %s bytes when reading %s." % (limit, line))
class InvalidHeader(BadHttpMessage):
def __init__(self, hdr):
if isinstance(hdr, bytes):
hdr = hdr.decode('utf-8', 'surrogateescape')
super().__init__('Invalid HTTP Header: {}'.format(hdr))
self.hdr = hdr
class BadStatusLine(BadHttpMessage):
def __init__(self, line=''):
if not line:
line = repr(line)
self.args = line,
self.line = line
class InvalidURLError(BadHttpMessage):
pass
| {
"content_hash": "75e0e4a51bf680af4ad070aaa2c4f750",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 78,
"avg_line_length": 22.987179487179485,
"alnum_prop": 0.6162855549358617,
"repo_name": "singulared/aiohttp",
"id": "7cb422a04115613a4d7405b7609987dcfde7fdf4",
"size": "1793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aiohttp/http_exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "CSS",
"bytes": "112"
},
{
"name": "HTML",
"bytes": "4890"
},
{
"name": "Makefile",
"bytes": "3293"
},
{
"name": "PLpgSQL",
"bytes": "765"
},
{
"name": "Python",
"bytes": "1209223"
},
{
"name": "Shell",
"bytes": "2309"
}
],
"symlink_target": ""
} |
"""Classes to read RINEX files."""
import math
import warnings
from collections import namedtuple, defaultdict
from datetime import timedelta
from .dtutils import validate_epoch, get_microsec
from .glo import fetch_slot_freq_num, FetchSlotFreqNumError
from .gnss import *
from .tec import Tec
class ObsFile(object):
"""Create an object to iterate over the records of RINEX observation
file. The ObsFileV2 object returns Tec instance on each iteration.
Parameters
----------
file : file-like object
RINEX v2.n file.
priority : dict, optional
{'S': ((1, 2), (1, 5)), ...}, where 'S' it's a satellite system
(e.g. 'G' for GPS), and the TEC values will be calculated according to
the sequence of bands ((1, 2), (1, 5)). For GPS, for example, we will
try to find 'L1' and 'L2' observation values and if it fails, try
to find 'L1' and 'L5' values.
glo_freq_nums : dict, optional
{ slot: { datetime.datetime: freq_number, ... }, ... }
In order to calculate total electron content for the GLONASS data,
we have to get frequency numbers for each slot in the constellation.
"""
def __init__(
self,
file,
version=None,
band_priority=BAND_PRIORITY,
pr_obs_priority=None,
glo_freq_nums=None,
):
""""""
self.fh = file
if version:
self.version = version
self.band_priority = band_priority
if pr_obs_priority is None:
# TODO: add fallback to default
self.pr_obs_priority = {
system: (('P', 'P'), ('C', 'C'), ('C', 'P'))
for system in (GPS, GLO, GAL, SBAS, BDS, QZSS, IRNSS)
}
else:
self.pr_obs_priority = pr_obs_priority
if glo_freq_nums is None:
self.glo_freq_nums = {}
else:
self.glo_freq_nums = glo_freq_nums
self.obs_types = self.retrieve_obs_types()
self.time_system = self.retrieve_time_system()
self.skip_header()
def skip_header(self):
"""Seek to the line right after 'END OF HEADER' record."""
try:
for row in self.fh:
header_label = self._get_header_label(row)
if header_label == 'END OF HEADER':
break
except StopIteration:
raise ValueError("{program}: Couldn't find 'END OF HEADER'")
def retrieve_obs_types(self):
pass
def retrieve_time_system(self):
"""Return a time system value from the header."""
time_system = None
try:
while time_system is None:
row = next(self.fh)
header_label = self._get_header_label(row)
if header_label == 'TIME OF FIRST OBS':
time_system = row[48:51]
except StopIteration:
raise ValueError("ObsFile: Couldn't find 'TIME OF FIRST OBS'")
self.fh.seek(0)
return time_system
def handle_event(self, epoch_flag, n_of_sats):
while n_of_sats:
next(self.fh)
n_of_sats -= 1
@staticmethod
def _get_num_value(obs_set):
"""Return tuple which consists of observation value,
LLI value and signal strength value."""
value, lli, sig_strength = obs_set
value = float(value) if not value.isspace() else 0.0
lli = int(lli) if not lli.isspace() else 0
sig_strength = int(sig_strength) if not sig_strength.isspace() else 0
return value, lli, sig_strength
@staticmethod
def _get_header_label(h_row):
"""Return RINEX header label."""
return h_row[60:].rstrip().upper()
class ObsFileV2(ObsFile):
"""Create an object to iterate over the records of RINEX observation file
v.2.n. The ObsFileV2 object yields Tec instance on each iteration.
"""
def __init__(
self,
file,
version=None,
band_priority=BAND_PRIORITY,
pr_obs_priority=None,
glo_freq_nums=None,
):
super(ObsFileV2, self).__init__(
file,
version=version,
band_priority=band_priority,
pr_obs_priority=pr_obs_priority,
glo_freq_nums=glo_freq_nums,
)
self._obs_types = set(self.obs_types)
@staticmethod
def _rfill(line):
"""Return a copy of the line right filled with white spaces to make
a line of length 80 chars."""
line = line.rstrip()
return line + ' ' * (80 - len(line))
@staticmethod
def _get_obs_indices(obs_types, band_priority, obs_priority):
"""Return indices from obs_types list according to band_priority
and obs_priority.
Parameters
----------
obs_types : list
List of observation types in a observation file. For example,
['L1', 'L2', 'L5', 'C1', 'C2', 'C5', 'P1', 'P2', 'S1', 'S2', 'S5'].
band_priority : list of lists
Pairs of the bands, e.g. [[1, 2], [1, 5], ...].
obs_priority : list of lists
Pairs of the observation types, e.g. [['P', 'P'], ['C', 'C'], ...]
Returns
-------
indices : tuple of tuples
Pairs of the indices according to obs_types and band_priority
lists.
"""
indices = []
obs_band_pair = []
for band in band_priority:
for obs in obs_priority:
obs_band_pair.append(zip(band, obs))
for pair in obs_band_pair:
combination = []
for band, obs in pair:
combination.append('{}{}'.format(obs, band))
try:
indices.append(
tuple(obs_types.index(o) for o in combination)
)
except ValueError:
continue
if not indices:
msg = "Can't find observable."
raise ValueError(msg)
return tuple(indices)
def _parse_epoch_record(self):
"""Parse epoch record
Returns
-------
timestamp : datetime
epoch_flag : int
n_of_sats : int
list_of_sats : list
"""
row = next(self.fh)
err_msg = "Can't parse epoch record."
epoch_flag, n_of_sats = None, None
try:
epoch_flag = int(row[26:29])
n_of_sats = int(row[29:32])
except ValueError:
raise ValueError(err_msg)
if epoch_flag > 1:
return None, epoch_flag, n_of_sats, None
timestamp = None
try:
sec = float(row[15:26])
microsec = get_microsec(sec)
timestamp = [int(row[i:i + 3]) for i in range(0, 13, 3)] + \
[int(i) for i in (sec, microsec)]
except ValueError:
raise ValueError(err_msg)
timestamp = validate_epoch(timestamp)
list_of_sats = row[32:68].rstrip()
rows_to_read = math.ceil(n_of_sats / 12.) - 1
if rows_to_read > 0:
while rows_to_read > 0:
row = next(self.fh)
list_of_sats += row[32:68].rstrip()
rows_to_read -= 1
list_of_sats = [list_of_sats[i:i + 3] for i in
range(0, len(list_of_sats), 3)]
msg = "Epoch's num of sats != actual num of sats"
assert len(list_of_sats) == n_of_sats, msg
return timestamp, epoch_flag, n_of_sats, list_of_sats
def _split_observations_row(self, observations):
"""Return a list of observations."""
observations = [observations[i:i + 16] for i in
range(0, len(observations), 16)]
observations = [(o[:14], o[14], o[15]) for o in
observations[:len(self.obs_types)]]
return observations
def retrieve_obs_types(self):
"""Returns a list which contains types of observations
from the header."""
obs_types = None
num_of_types = 0
try:
while not obs_types:
row = next(self.fh)
header_label = self._get_header_label(row)
if header_label == '# / TYPES OF OBSERV':
num_of_types = int(row[:6].lstrip())
obs_types = row[6:60]
if num_of_types > 9:
rows_to_read = math.ceil(num_of_types / 9) - 1
while rows_to_read > 0:
row = next(self.fh)
obs_types += row[6:60]
rows_to_read -= 1
obs_types = obs_types.split()
except StopIteration:
raise ValueError("tec: Can't find '# / TYPES OF OBSERV'; "
"unexpected end of the file.")
except ValueError:
raise ValueError("tec: Can't extract '# / TYPES OF OBSERV'")
msg = "Some obs types are missing."
assert num_of_types == len(obs_types), msg
self.fh.seek(0)
return obs_types
def next_tec(self):
"""Yields Tec object."""
while 1:
try:
(timestamp, epoch_flag,
n_of_sats, list_of_sats) = self._parse_epoch_record()
except StopIteration:
return
if epoch_flag > 1:
self.handle_event(epoch_flag, n_of_sats)
continue
phase_obs_code = dict()
phase_obs_index = dict()
pr_obs_code = dict()
pr_obs_index = dict()
for satellite in list_of_sats:
if satellite[0] == ' ':
satellite = 'G{}'.format(satellite[1:])
sat_sys = satellite[0].upper()
observations_row = self._rfill(next(self.fh))
rows_to_read = math.ceil(len(self.obs_types) / 5.) - 1
while rows_to_read > 0:
observations_row += self._rfill(next(self.fh))
rows_to_read -= 1
observations = self._split_observations_row(observations_row)
freq_num = None
try:
if sat_sys == GLO:
slot = int(satellite[1:])
freq_num = fetch_slot_freq_num(
timestamp,
slot,
self.glo_freq_nums,
)
except FetchSlotFreqNumError as err:
warnings.warn(str(err))
continue
# TODO: обернуть в одну ф-цию
try:
if sat_sys not in phase_obs_index:
indices = self._get_obs_indices(
self.obs_types,
self.band_priority[sat_sys],
(('L', 'L'),),
)
phase_obs_index[sat_sys] = dict(
zip([1, 2], indices[0])
)
if sat_sys not in phase_obs_code:
phase_obs_code[sat_sys] = {
1: self.obs_types[phase_obs_index[sat_sys][1]],
2: self.obs_types[phase_obs_index[sat_sys][2]],
}
if sat_sys not in pr_obs_index:
indices = self._get_obs_indices(
self.obs_types,
self.band_priority[sat_sys],
self.pr_obs_priority[sat_sys],
)
pr_obs_index[sat_sys] = dict(
zip([1, 2], indices[0])
)
if sat_sys not in pr_obs_code:
pr_obs_code[sat_sys] = {
1: self.obs_types[pr_obs_index[sat_sys][1]],
2: self.obs_types[pr_obs_index[sat_sys][2]]
}
except ValueError:
msg = ("Can't find observable to calculate TEC "
"using '{}' system.")
msg = msg.format(sat_sys)
warnings.warn(msg)
continue
tec = Tec(
timestamp,
self.time_system,
satellite,
freq_num,
)
tec.phase_code = phase_obs_code[sat_sys]
tec.p_range_code = pr_obs_code[sat_sys]
sig_strength = {1: None, 2: None}
for b in 1, 2:
obs = observations[phase_obs_index[sat_sys][b]]
obs = self._get_num_value(obs)
tec.phase[b] = obs[0]
tec.lli[b] = obs[1] & 1 # bit 0 only
sig_strength[b] = obs[2]
tec.signal_strength = sig_strength[1]
for b in 1, 2:
obs = observations[pr_obs_index[sat_sys][b]]
obs = self._get_num_value(obs)
tec.p_range[b] = obs[0]
yield tec
def __iter__(self):
return self.next_tec()
class ObsFileV3(ObsFile):
"""Create an object to iterate over the records of RINEX observation
file. Yields Tec object on each iteration."""
phase_code = 'L'
prange_code = 'C'
bands = {
GPS: (1, 2, 5),
GLO: (1, 2, 3),
GAL: (1, 5, 6, 7, 8),
BDS: (2, 6, 7),
SBAS: (1, 5),
QZSS: (1, 2, 5, 6),
IRNSS: (5, 9),
}
channels = {
GPS: {
1: 'CSLXPWYMN',
2: 'CDSLXPWYMN',
5: 'IQX',
},
GLO: {
1: 'CP',
2: 'CP',
3: 'IQX',
},
GAL: {
1: 'ABCXZ',
5: 'IQX',
6: 'ABCXZ',
7: 'IQX',
8: 'IQX',
},
BDS: {
2: 'IQX',
6: 'IQX',
7: 'IQX',
},
SBAS: {
1: 'C',
5: 'IQX',
},
QZSS: {
1: 'CSLXZ',
2: 'SLX',
5: 'IQX',
6: 'SLX',
},
IRNSS: {
5: 'ABCX',
9: 'ABCX',
}
}
def __init__(
self,
file,
version=None,
band_priority=BAND_PRIORITY,
pr_obs_priority=None,
glo_freq_nums=None,
):
super(ObsFileV3, self).__init__(
file,
version=version,
band_priority=band_priority,
pr_obs_priority=pr_obs_priority,
glo_freq_nums=glo_freq_nums,
)
self.obs_rec_indices = self._obs_slice_indices()
self.phase_obs_codes = None
self.prange_obs_codes = None
self._generate_obs_codes()
self._observation = namedtuple(
'observation',
['code', 'value', 'lli', 'signal_strength']
)
self._observation_records = namedtuple(
'observation_records',
['satellite', 'records']
)
self._obsrevation_indices = namedtuple(
'observation_indices',
['phase', 'pseudo_range'],
)
def _generate_obs_codes(self):
"""Generate observation codes for the satellite systems."""
self.phase_obs_codes = defaultdict(dict)
self.prange_obs_codes = defaultdict(dict)
v3_sat_systems = GPS, GLO, GAL, BDS, SBAS, QZSS, IRNSS
obs_code_fmt = '{code}{band}{channel}'
for sat_system in v3_sat_systems:
for band in ObsFileV3.bands[sat_system]:
phase_obs_codes = list()
prange_obs_codes = list()
for channel in ObsFileV3.channels[sat_system][band]:
phase_obs_codes.append(
obs_code_fmt.format(
code=ObsFileV3.phase_code,
band=band,
channel=channel,
)
)
prange_obs_codes.append(
obs_code_fmt.format(
code=ObsFileV3.prange_code,
band=band,
channel=channel,
)
)
self.phase_obs_codes[sat_system][band] = \
tuple(phase_obs_codes)
self.prange_obs_codes[sat_system][band] = \
tuple(prange_obs_codes)
def __iter__(self):
return self.next_tec()
def _obs_slice_indices(self):
"""Return indices to slice observation record into single observations.
Returns
-------
indices : tuple
"""
obs_amount = [len(self.obs_types[d]) for d in self.obs_types]
max_obs_num = max(obs_amount)
rec_len = 16
start = 3
stop = max_obs_num * rec_len
indices = [[i, i + rec_len] for i in range(start, stop, rec_len)]
return tuple(indices)
@staticmethod
def _is_epoch_record(row):
return True if row[0] == '>' else False
@staticmethod
def _parse_epoch_record(row):
"""Parse epoch record"""
# month, day, hour, min
epoch = [row[i:i + 3] for i in range(6, 17, 3)]
# year + ...
epoch = [row[1:6]] + epoch
sec = row[18:29]
try:
sec = float(sec)
micro_sec = get_microsec(sec)
epoch += [sec, micro_sec]
epoch = list(map(int, epoch))
epoch = validate_epoch(epoch)
except ValueError:
epoch = None
epoch_flag = int(row[31])
num_of_sat = int(row[32:35])
try:
sec = float(row[42:])
micro_sec = get_microsec(sec)
clock_offset = timedelta(0, int(sec), int(micro_sec))
except ValueError:
clock_offset = timedelta(0)
return epoch, epoch_flag, num_of_sat, clock_offset
def _parse_obs_record(self, row):
"""Parse observation record
Parameters
----------
row : str
Returns
-------
sat : str
satellite
obs_values : tuple
(obs_values_1, ..., obs_values_n)
with obs_values_x = (obs_value, lli_value, sig_strength_value)
"""
sat = row[0:3]
sat = sat.replace(' ', '0')
sat_system = sat[0]
if sat_system not in self.obs_types:
msg = ('There is no such satellite system definition '
'in the header: {ss}.')
raise ValueError(msg.format(ss=sat_system))
records = []
obs_num = len(self.obs_types[sat_system])
for n in range(obs_num):
s, e = self.obs_rec_indices[n]
chunk = row[s:e]
code = self.obs_types[sat_system][n]
if not chunk or chunk.isspace():
records.append(self._observation(code, 0, 0, 0))
continue
val = chunk[:14]
try:
if not val or val.isspace():
val = 0.0
else:
val = float(val)
except ValueError:
val = 0.0
feature = []
for i in 14, 15:
try:
v = chunk[i]
if v.isspace():
v = 0
else:
v = int(v)
except (IndexError, ValueError):
v = 0
feature.append(v)
obs = self._observation(
code,
val,
feature[0],
feature[1],
)
records.append(obs)
return self._observation_records(sat, tuple(records))
def retrieve_obs_types(self):
"""Return types of observations."""
obs_types_rows = ''
try:
header_label = ''
while header_label != 'END OF HEADER':
row = next(self.fh)
header_label = self._get_header_label(row)
if header_label == 'SYS / # / OBS TYPES':
obs_types_rows += row
except StopIteration:
raise ValueError("tec: Can't find 'SYS / # / OBS TYPES'; "
"unexpected end of the file.")
obs_types_records = []
for line in obs_types_rows.split('\n'):
if not line or line.isspace():
continue
if line[0:6].isspace():
obs_types_records[-1] += line[7:60]
else:
obs_types_records.append(line[:60])
del obs_types_rows
obs_types = {}
try:
for record in obs_types_records:
record = record.split()
sat_sys = record[0]
num_of_obs = int(record[1])
sys_obs_types = tuple(record[2:])
# misunderstanding with band #1 in Compass/BeiDou
# see RINEX v3.n format for the details
if self.version >= 3.02 and sat_sys == BDS:
corrected_obs_types = list(sys_obs_types)
for i, t in enumerate(corrected_obs_types):
if t[1] == '1':
t = t.replace('1', '2')
corrected_obs_types[i] = t
sys_obs_types = tuple(corrected_obs_types)
warn_msg = (
'ObsFileV3: '
'Wrong number of observations {ot} (expected {n}).'
)
assert len(sys_obs_types) == num_of_obs, \
warn_msg.format(ot=len(sys_obs_types), n=num_of_obs)
obs_types[sat_sys] = sys_obs_types
except ValueError as err:
print(err)
raise err
del obs_types_records
self.fh.seek(0)
return obs_types
def indices_according_priority(self, sat_system):
"""Return obs_types indices according band priority."""
def code(current_codes, all_codes):
union = set(current_codes) & set(all_codes)
return [all_codes.index(c) for c in union]
def indices(b_priority, ot_indices):
for first_band, second_band in b_priority:
if ot_indices[first_band] and ot_indices[second_band]:
return {
1: ot_indices[first_band][0],
2: ot_indices[second_band][0],
}
msg = "Can't find any observations to calculate TEC."
raise ValueError(msg)
obs_types = self.obs_types[sat_system]
bands = self.bands[sat_system]
band_priority = self.band_priority[sat_system]
phase_obs_codes = self.phase_obs_codes[sat_system]
pr_obs_codes = self.prange_obs_codes[sat_system]
phase_ot_indices = dict(
zip(bands, (None,) * 3)
)
pr_ot_indices = dict(
zip(bands, (None,) * 3)
)
for b in bands:
phase_ot_indices[b] = code(phase_obs_codes[b], obs_types)
pr_ot_indices[b] = code(pr_obs_codes[b], obs_types)
phase_indices = indices(band_priority, phase_ot_indices)
pr_indices = indices(band_priority, pr_ot_indices)
return self._obsrevation_indices(phase_indices, pr_indices)
def next_tec(self):
"""Yields Tec object."""
obs_indices = {}
while True:
try:
row = next(self.fh)
except StopIteration:
return
if not self._is_epoch_record(row):
msg = 'Unexpected format of the record: {row}'
raise ValueError(msg.format(row=row))
(timestamp,
epoch_flag,
num_of_sat,
clock_offset) = self._parse_epoch_record(row)
if epoch_flag > 1:
self.handle_event(epoch_flag, num_of_sat)
continue
while num_of_sat:
num_of_sat -= 1
row = next(self.fh)
observations = self._parse_obs_record(row)
sat_sys = observations.satellite[0]
freq_num = None
if sat_sys == GLO:
try:
freq_num = fetch_slot_freq_num(
timestamp,
int(observations.satellite[1:]),
self.glo_freq_nums,
)
except FetchSlotFreqNumError as err:
warnings.warn(str(err))
continue
tec = Tec(
timestamp,
self.time_system,
satellite=observations.satellite,
glo_freq_num=freq_num,
)
try:
if sat_sys not in obs_indices:
obs_indices[sat_sys] = (
self.indices_according_priority(sat_sys)
)
except ValueError as err:
# TODO: add logger (info)
continue
for b in 1, 2:
obs = observations.records[obs_indices[sat_sys].phase[b]]
tec.phase_code[b] = obs.code
tec.phase[b] = obs.value
tec.lli[b] = obs.lli & 1
obs = observations.records[
obs_indices[sat_sys].pseudo_range[b]]
tec.p_range_code[b] = obs.code
tec.p_range[b] = obs.value
yield tec
| {
"content_hash": "437da68a0e90b28b32b36639b19980fa",
"timestamp": "",
"source": "github",
"line_count": 831,
"max_line_length": 79,
"avg_line_length": 31.492178098676295,
"alnum_prop": 0.4622850592281238,
"repo_name": "gnss-lab/gnss-tec",
"id": "5ec1da1f436da3bdb7ef7ae6c3553de903adfa02",
"size": "26201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gnss_tec/rinex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88860"
}
],
"symlink_target": ""
} |
from pytest import warns
from dash import dcc, dash_table
def filter_dir(package):
ignore = [
"warnings",
"json",
"async_resources",
"package",
"package_name",
"f",
"express",
]
return sorted(
[
item
for item in dir(package)
if item == "__version__" or (item[0] not in "@_" and item not in ignore)
]
)
def test_old_dcc():
with warns(UserWarning, match="dash_core_components package is deprecated"):
import dash_core_components as _dcc
old_dir = filter_dir(_dcc)
new_dir = filter_dir(dcc)
assert old_dir == new_dir
def test_old_table():
with warns(UserWarning, match="dash_table package is deprecated"):
import dash_table as _dt
old_dir = filter_dir(_dt)
new_dir = filter_dir(dash_table)
assert old_dir == new_dir
| {
"content_hash": "cb355bcc8af62cf54f47b252c0919988",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 84,
"avg_line_length": 21.904761904761905,
"alnum_prop": 0.55,
"repo_name": "plotly/dash",
"id": "08d7b14a23f3e92d47dc7fb2679a5002d715b029",
"size": "920",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/unit/test_old_imports.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17191"
},
{
"name": "HTML",
"bytes": "1729"
},
{
"name": "JavaScript",
"bytes": "638735"
},
{
"name": "Less",
"bytes": "22320"
},
{
"name": "Python",
"bytes": "1304969"
},
{
"name": "Shell",
"bytes": "224"
},
{
"name": "TypeScript",
"bytes": "840257"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.contrib import admin
from models import Project, TestSet, TestCase, TestStep, TestSetRun, TestCaseRun
admin.site.register(Project)
admin.site.register(TestSet)
admin.site.register(TestCase)
admin.site.register(TestStep)
admin.site.register(TestSetRun)
admin.site.register(TestCaseRun)
| {
"content_hash": "5c632ef888d2814b6d025d28cfcd1f8f",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 80,
"avg_line_length": 30.818181818181817,
"alnum_prop": 0.8230088495575221,
"repo_name": "Shibashisdas/testmanager-backend",
"id": "f67f4747d256d532f520aedec13d2eeaea313fab",
"size": "363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15467"
}
],
"symlink_target": ""
} |
import os
import pathlib
import re
from collections.abc import Container, Iterable, Mapping, MutableMapping, Sized
from urllib.parse import unquote
import pytest
from yarl import URL
import aiohttp
from aiohttp import hdrs, web
from aiohttp.test_utils import make_mocked_request
from aiohttp.web import HTTPMethodNotAllowed, HTTPNotFound, Response
from aiohttp.web_urldispatcher import (
PATH_SEP,
AbstractResource,
Domain,
DynamicResource,
MaskDomain,
PlainResource,
ResourceRoute,
StaticResource,
SystemRoute,
View,
_default_expect_handler,
)
def make_handler():
async def handler(request):
return Response(request) # pragma: no cover
return handler
@pytest.fixture
def app():
return web.Application()
@pytest.fixture
def router(app):
return app.router
@pytest.fixture
def fill_routes(router):
def go():
route1 = router.add_route('GET', '/plain', make_handler())
route2 = router.add_route('GET', '/variable/{name}',
make_handler())
resource = router.add_static('/static',
os.path.dirname(aiohttp.__file__))
return [route1, route2] + list(resource)
return go
def test_register_uncommon_http_methods(router) -> None:
uncommon_http_methods = {
'PROPFIND',
'PROPPATCH',
'COPY',
'LOCK',
'UNLOCK'
'MOVE',
'SUBSCRIBE',
'UNSUBSCRIBE',
'NOTIFY'
}
for method in uncommon_http_methods:
router.add_route(method, '/handler/to/path', make_handler())
async def test_add_route_root(router) -> None:
handler = make_handler()
router.add_route('GET', '/', handler)
req = make_mocked_request('GET', '/')
info = await router.resolve(req)
assert info is not None
assert 0 == len(info)
assert handler is info.handler
assert info.route.name is None
async def test_add_route_simple(router) -> None:
handler = make_handler()
router.add_route('GET', '/handler/to/path', handler)
req = make_mocked_request('GET', '/handler/to/path')
info = await router.resolve(req)
assert info is not None
assert 0 == len(info)
assert handler is info.handler
assert info.route.name is None
async def test_add_with_matchdict(router) -> None:
handler = make_handler()
router.add_route('GET', '/handler/{to}', handler)
req = make_mocked_request('GET', '/handler/tail')
info = await router.resolve(req)
assert info is not None
assert {'to': 'tail'} == info
assert handler is info.handler
assert info.route.name is None
async def test_add_with_matchdict_with_colon(router) -> None:
handler = make_handler()
router.add_route('GET', '/handler/{to}', handler)
req = make_mocked_request('GET', '/handler/1:2:3')
info = await router.resolve(req)
assert info is not None
assert {'to': '1:2:3'} == info
assert handler is info.handler
assert info.route.name is None
async def test_add_route_with_add_get_shortcut(router) -> None:
handler = make_handler()
router.add_get('/handler/to/path', handler)
req = make_mocked_request('GET', '/handler/to/path')
info = await router.resolve(req)
assert info is not None
assert 0 == len(info)
assert handler is info.handler
assert info.route.name is None
async def test_add_route_with_add_post_shortcut(router) -> None:
handler = make_handler()
router.add_post('/handler/to/path', handler)
req = make_mocked_request('POST', '/handler/to/path')
info = await router.resolve(req)
assert info is not None
assert 0 == len(info)
assert handler is info.handler
assert info.route.name is None
async def test_add_route_with_add_put_shortcut(router) -> None:
handler = make_handler()
router.add_put('/handler/to/path', handler)
req = make_mocked_request('PUT', '/handler/to/path')
info = await router.resolve(req)
assert info is not None
assert 0 == len(info)
assert handler is info.handler
assert info.route.name is None
async def test_add_route_with_add_patch_shortcut(router) -> None:
handler = make_handler()
router.add_patch('/handler/to/path', handler)
req = make_mocked_request('PATCH', '/handler/to/path')
info = await router.resolve(req)
assert info is not None
assert 0 == len(info)
assert handler is info.handler
assert info.route.name is None
async def test_add_route_with_add_delete_shortcut(router) -> None:
handler = make_handler()
router.add_delete('/handler/to/path', handler)
req = make_mocked_request('DELETE', '/handler/to/path')
info = await router.resolve(req)
assert info is not None
assert 0 == len(info)
assert handler is info.handler
assert info.route.name is None
async def test_add_route_with_add_head_shortcut(router) -> None:
handler = make_handler()
router.add_head('/handler/to/path', handler)
req = make_mocked_request('HEAD', '/handler/to/path')
info = await router.resolve(req)
assert info is not None
assert 0 == len(info)
assert handler is info.handler
assert info.route.name is None
async def test_add_with_name(router) -> None:
handler = make_handler()
router.add_route('GET', '/handler/to/path', handler,
name='name')
req = make_mocked_request('GET', '/handler/to/path')
info = await router.resolve(req)
assert info is not None
assert 'name' == info.route.name
async def test_add_with_tailing_slash(router) -> None:
handler = make_handler()
router.add_route('GET', '/handler/to/path/', handler)
req = make_mocked_request('GET', '/handler/to/path/')
info = await router.resolve(req)
assert info is not None
assert {} == info
assert handler is info.handler
def test_add_invalid_path(router) -> None:
handler = make_handler()
with pytest.raises(ValueError):
router.add_route('GET', '/{/', handler)
def test_add_url_invalid1(router) -> None:
handler = make_handler()
with pytest.raises(ValueError):
router.add_route('post', '/post/{id', handler)
def test_add_url_invalid2(router) -> None:
handler = make_handler()
with pytest.raises(ValueError):
router.add_route('post', '/post/{id{}}', handler)
def test_add_url_invalid3(router) -> None:
handler = make_handler()
with pytest.raises(ValueError):
router.add_route('post', '/post/{id{}', handler)
def test_add_url_invalid4(router) -> None:
handler = make_handler()
with pytest.raises(ValueError):
router.add_route('post', '/post/{id"}', handler)
async def test_add_url_escaping(router) -> None:
handler = make_handler()
router.add_route('GET', '/+$', handler)
req = make_mocked_request('GET', '/+$')
info = await router.resolve(req)
assert info is not None
assert handler is info.handler
async def test_any_method(router) -> None:
handler = make_handler()
route = router.add_route(hdrs.METH_ANY, '/', handler)
req = make_mocked_request('GET', '/')
info1 = await router.resolve(req)
assert info1 is not None
assert route is info1.route
req = make_mocked_request('POST', '/')
info2 = await router.resolve(req)
assert info2 is not None
assert info1.route is info2.route
async def test_match_second_result_in_table(router) -> None:
handler1 = make_handler()
handler2 = make_handler()
router.add_route('GET', '/h1', handler1)
router.add_route('POST', '/h2', handler2)
req = make_mocked_request('POST', '/h2')
info = await router.resolve(req)
assert info is not None
assert {} == info
assert handler2 is info.handler
async def test_raise_method_not_allowed(router) -> None:
handler1 = make_handler()
handler2 = make_handler()
router.add_route('GET', '/', handler1)
router.add_route('POST', '/', handler2)
req = make_mocked_request('PUT', '/')
match_info = await router.resolve(req)
assert isinstance(match_info.route, SystemRoute)
assert {} == match_info
with pytest.raises(HTTPMethodNotAllowed) as ctx:
await match_info.handler(req)
exc = ctx.value
assert 'PUT' == exc.method
assert 405 == exc.status
assert {'POST', 'GET'} == exc.allowed_methods
async def test_raise_method_not_found(router) -> None:
handler = make_handler()
router.add_route('GET', '/a', handler)
req = make_mocked_request('GET', '/b')
match_info = await router.resolve(req)
assert isinstance(match_info.route, SystemRoute)
assert {} == match_info
with pytest.raises(HTTPNotFound) as ctx:
await match_info.handler(req)
exc = ctx.value
assert 404 == exc.status
def test_double_add_url_with_the_same_name(router) -> None:
handler1 = make_handler()
handler2 = make_handler()
router.add_route('GET', '/get', handler1, name='name')
regexp = ("Duplicate 'name', already handled by")
with pytest.raises(ValueError) as ctx:
router.add_route('GET', '/get_other', handler2, name='name')
assert re.match(regexp, str(ctx.value))
def test_route_plain(router) -> None:
handler = make_handler()
route = router.add_route('GET', '/get', handler, name='name')
route2 = next(iter(router['name']))
url = route2.url_for()
assert '/get' == str(url)
assert route is route2
def test_route_unknown_route_name(router) -> None:
with pytest.raises(KeyError):
router['unknown']
def test_route_dynamic(router) -> None:
handler = make_handler()
route = router.add_route('GET', '/get/{name}', handler,
name='name')
route2 = next(iter(router['name']))
url = route2.url_for(name='John')
assert '/get/John' == str(url)
assert route is route2
def test_add_static(router) -> None:
resource = router.add_static('/st',
os.path.dirname(aiohttp.__file__),
name='static')
assert router['static'] is resource
url = resource.url_for(filename='/dir/a.txt')
assert '/st/dir/a.txt' == str(url)
assert len(resource) == 2
def test_add_static_append_version(router) -> None:
resource = router.add_static('/st',
os.path.dirname(__file__),
name='static')
url = resource.url_for(filename='/data.unknown_mime_type',
append_version=True)
expect_url = '/st/data.unknown_mime_type?' \
'v=aUsn8CHEhhszc81d28QmlcBW0KQpfS2F4trgQKhOYd8%3D'
assert expect_url == str(url)
def test_add_static_append_version_set_from_constructor(router) -> None:
resource = router.add_static('/st',
os.path.dirname(__file__),
append_version=True,
name='static')
url = resource.url_for(filename='/data.unknown_mime_type')
expect_url = '/st/data.unknown_mime_type?' \
'v=aUsn8CHEhhszc81d28QmlcBW0KQpfS2F4trgQKhOYd8%3D'
assert expect_url == str(url)
def test_add_static_append_version_override_constructor(router) -> None:
resource = router.add_static('/st',
os.path.dirname(__file__),
append_version=True,
name='static')
url = resource.url_for(filename='/data.unknown_mime_type',
append_version=False)
expect_url = '/st/data.unknown_mime_type'
assert expect_url == str(url)
def test_add_static_append_version_filename_without_slash(router) -> None:
resource = router.add_static('/st',
os.path.dirname(__file__),
name='static')
url = resource.url_for(filename='data.unknown_mime_type',
append_version=True)
expect_url = '/st/data.unknown_mime_type?' \
'v=aUsn8CHEhhszc81d28QmlcBW0KQpfS2F4trgQKhOYd8%3D'
assert expect_url == str(url)
def test_add_static_append_version_non_exists_file(router) -> None:
resource = router.add_static('/st',
os.path.dirname(__file__),
name='static')
url = resource.url_for(filename='/non_exists_file', append_version=True)
assert '/st/non_exists_file' == str(url)
def test_add_static_append_version_non_exists_file_without_slash(
router) -> None:
resource = router.add_static('/st',
os.path.dirname(__file__),
name='static')
url = resource.url_for(filename='non_exists_file', append_version=True)
assert '/st/non_exists_file' == str(url)
def test_add_static_append_version_follow_symlink(router, tmpdir) -> None:
"""
Tests the access to a symlink, in static folder with apeend_version
"""
tmp_dir_path = str(tmpdir)
symlink_path = os.path.join(tmp_dir_path, 'append_version_symlink')
symlink_target_path = os.path.dirname(__file__)
os.symlink(symlink_target_path, symlink_path, True)
# Register global static route:
resource = router.add_static('/st', tmp_dir_path, follow_symlinks=True,
append_version=True)
url = resource.url_for(
filename='/append_version_symlink/data.unknown_mime_type')
expect_url = '/st/append_version_symlink/data.unknown_mime_type?' \
'v=aUsn8CHEhhszc81d28QmlcBW0KQpfS2F4trgQKhOYd8%3D'
assert expect_url == str(url)
def test_add_static_append_version_not_follow_symlink(router, tmpdir) -> None:
"""
Tests the access to a symlink, in static folder with apeend_version
"""
tmp_dir_path = str(tmpdir)
symlink_path = os.path.join(tmp_dir_path, 'append_version_symlink')
symlink_target_path = os.path.dirname(__file__)
os.symlink(symlink_target_path, symlink_path, True)
# Register global static route:
resource = router.add_static('/st', tmp_dir_path, follow_symlinks=False,
append_version=True)
filename = '/append_version_symlink/data.unknown_mime_type'
url = resource.url_for(filename=filename)
assert '/st/append_version_symlink/data.unknown_mime_type' == str(url)
def test_plain_not_match(router) -> None:
handler = make_handler()
router.add_route('GET', '/get/path', handler, name='name')
route = router['name']
assert route._match('/another/path') is None
def test_dynamic_not_match(router) -> None:
handler = make_handler()
router.add_route('GET', '/get/{name}', handler, name='name')
route = router['name']
assert route._match('/another/path') is None
async def test_static_not_match(router) -> None:
router.add_static('/pre', os.path.dirname(aiohttp.__file__),
name='name')
resource = router['name']
ret = await resource.resolve(
make_mocked_request('GET', '/another/path'))
assert (None, set()) == ret
def test_dynamic_with_trailing_slash(router) -> None:
handler = make_handler()
router.add_route('GET', '/get/{name}/', handler, name='name')
route = router['name']
assert {'name': 'John'} == route._match('/get/John/')
def test_len(router) -> None:
handler = make_handler()
router.add_route('GET', '/get1', handler, name='name1')
router.add_route('GET', '/get2', handler, name='name2')
assert 2 == len(router)
def test_iter(router) -> None:
handler = make_handler()
router.add_route('GET', '/get1', handler, name='name1')
router.add_route('GET', '/get2', handler, name='name2')
assert {'name1', 'name2'} == set(iter(router))
def test_contains(router) -> None:
handler = make_handler()
router.add_route('GET', '/get1', handler, name='name1')
router.add_route('GET', '/get2', handler, name='name2')
assert 'name1' in router
assert 'name3' not in router
def test_static_repr(router) -> None:
router.add_static('/get', os.path.dirname(aiohttp.__file__),
name='name')
assert re.match(r"<StaticResource 'name' /get", repr(router['name']))
def test_static_adds_slash(router) -> None:
route = router.add_static('/prefix',
os.path.dirname(aiohttp.__file__))
assert '/prefix' == route._prefix
def test_static_remove_trailing_slash(router) -> None:
route = router.add_static('/prefix/',
os.path.dirname(aiohttp.__file__))
assert '/prefix' == route._prefix
async def test_add_route_with_re(router) -> None:
handler = make_handler()
router.add_route('GET', r'/handler/{to:\d+}', handler)
req = make_mocked_request('GET', '/handler/1234')
info = await router.resolve(req)
assert info is not None
assert {'to': '1234'} == info
router.add_route('GET', r'/handler/{name}.html', handler)
req = make_mocked_request('GET', '/handler/test.html')
info = await router.resolve(req)
assert {'name': 'test'} == info
async def test_add_route_with_re_and_slashes(router) -> None:
handler = make_handler()
router.add_route('GET', r'/handler/{to:[^/]+/?}', handler)
req = make_mocked_request('GET', '/handler/1234/')
info = await router.resolve(req)
assert info is not None
assert {'to': '1234/'} == info
router.add_route('GET', r'/handler/{to:.+}', handler)
req = make_mocked_request('GET', '/handler/1234/5/6/7')
info = await router.resolve(req)
assert info is not None
assert {'to': '1234/5/6/7'} == info
async def test_add_route_with_re_not_match(router) -> None:
handler = make_handler()
router.add_route('GET', r'/handler/{to:\d+}', handler)
req = make_mocked_request('GET', '/handler/tail')
match_info = await router.resolve(req)
assert isinstance(match_info.route, SystemRoute)
assert {} == match_info
with pytest.raises(HTTPNotFound):
await match_info.handler(req)
async def test_add_route_with_re_including_slashes(router) -> None:
handler = make_handler()
router.add_route('GET', r'/handler/{to:.+}/tail', handler)
req = make_mocked_request('GET', '/handler/re/with/slashes/tail')
info = await router.resolve(req)
assert info is not None
assert {'to': 're/with/slashes'} == info
def test_add_route_with_invalid_re(router) -> None:
handler = make_handler()
with pytest.raises(ValueError) as ctx:
router.add_route('GET', r'/handler/{to:+++}', handler)
s = str(ctx.value)
assert s.startswith("Bad pattern '" +
PATH_SEP +
"handler" +
PATH_SEP +
"(?P<to>+++)': nothing to repeat")
assert ctx.value.__cause__ is None
def test_route_dynamic_with_regex_spec(router) -> None:
handler = make_handler()
route = router.add_route('GET', r'/get/{num:^\d+}', handler,
name='name')
url = route.url_for(num='123')
assert '/get/123' == str(url)
def test_route_dynamic_with_regex_spec_and_trailing_slash(router) -> None:
handler = make_handler()
route = router.add_route('GET', r'/get/{num:^\d+}/', handler,
name='name')
url = route.url_for(num='123')
assert '/get/123/' == str(url)
def test_route_dynamic_with_regex(router) -> None:
handler = make_handler()
route = router.add_route('GET', r'/{one}/{two:.+}', handler)
url = route.url_for(one='1', two='2')
assert '/1/2' == str(url)
def test_route_dynamic_quoting(router) -> None:
handler = make_handler()
route = router.add_route('GET', r'/{arg}', handler)
url = route.url_for(arg='1 2/текст')
assert '/1%202/%D1%82%D0%B5%D0%BA%D1%81%D1%82' == str(url)
async def test_regular_match_info(router) -> None:
handler = make_handler()
router.add_route('GET', '/get/{name}', handler)
req = make_mocked_request('GET', '/get/john')
match_info = await router.resolve(req)
assert {'name': 'john'} == match_info
assert re.match("<MatchInfo {'name': 'john'}: .+<Dynamic.+>>",
repr(match_info))
async def test_match_info_with_plus(router) -> None:
handler = make_handler()
router.add_route('GET', '/get/{version}', handler)
req = make_mocked_request('GET', '/get/1.0+test')
match_info = await router.resolve(req)
assert {'version': '1.0+test'} == match_info
async def test_not_found_repr(router) -> None:
req = make_mocked_request('POST', '/path/to')
match_info = await router.resolve(req)
assert "<MatchInfoError 404: Not Found>" == repr(match_info)
async def test_not_allowed_repr(router) -> None:
handler = make_handler()
router.add_route('GET', '/path/to', handler)
handler2 = make_handler()
router.add_route('POST', '/path/to', handler2)
req = make_mocked_request('PUT', '/path/to')
match_info = await router.resolve(req)
assert "<MatchInfoError 405: Method Not Allowed>" == repr(match_info)
def test_default_expect_handler(router) -> None:
route = router.add_route('GET', '/', make_handler())
assert route._expect_handler is _default_expect_handler
def test_custom_expect_handler_plain(router) -> None:
async def handler(request):
pass
route = router.add_route(
'GET', '/', make_handler(), expect_handler=handler)
assert route._expect_handler is handler
assert isinstance(route, ResourceRoute)
def test_custom_expect_handler_dynamic(router) -> None:
async def handler(request):
pass
route = router.add_route(
'GET', '/get/{name}', make_handler(), expect_handler=handler)
assert route._expect_handler is handler
assert isinstance(route, ResourceRoute)
def test_expect_handler_non_coroutine(router) -> None:
def handler(request):
pass
with pytest.raises(AssertionError):
router.add_route('GET', '/', make_handler(),
expect_handler=handler)
async def test_dynamic_match_non_ascii(router) -> None:
handler = make_handler()
router.add_route('GET', '/{var}', handler)
req = make_mocked_request(
'GET',
'/%D1%80%D1%83%D1%81%20%D1%82%D0%B5%D0%BA%D1%81%D1%82')
match_info = await router.resolve(req)
assert {'var': 'рус текст'} == match_info
async def test_dynamic_match_with_static_part(router) -> None:
handler = make_handler()
router.add_route('GET', '/{name}.html', handler)
req = make_mocked_request('GET', '/file.html')
match_info = await router.resolve(req)
assert {'name': 'file'} == match_info
async def test_dynamic_match_two_part2(router) -> None:
handler = make_handler()
router.add_route('GET', '/{name}.{ext}', handler)
req = make_mocked_request('GET', '/file.html')
match_info = await router.resolve(req)
assert {'name': 'file', 'ext': 'html'} == match_info
async def test_dynamic_match_unquoted_path(router) -> None:
handler = make_handler()
router.add_route('GET', '/{path}/{subpath}', handler)
resource_id = 'my%2Fpath%7Cwith%21some%25strange%24characters'
req = make_mocked_request('GET', '/path/{0}'.format(resource_id))
match_info = await router.resolve(req)
assert match_info == {
'path': 'path',
'subpath': unquote(resource_id)
}
def test_add_route_not_started_with_slash(router) -> None:
with pytest.raises(ValueError):
handler = make_handler()
router.add_route('GET', 'invalid_path', handler)
def test_add_route_invalid_method(router) -> None:
sample_bad_methods = {
'BAD METHOD',
'B@D_METHOD',
'[BAD_METHOD]',
'{BAD_METHOD}',
'(BAD_METHOD)',
'B?D_METHOD',
}
for bad_method in sample_bad_methods:
with pytest.raises(ValueError):
handler = make_handler()
router.add_route(bad_method, '/path', handler)
def test_routes_view_len(router, fill_routes) -> None:
fill_routes()
assert 4 == len(router.routes())
def test_routes_view_iter(router, fill_routes) -> None:
routes = fill_routes()
assert list(routes) == list(router.routes())
def test_routes_view_contains(router, fill_routes) -> None:
routes = fill_routes()
for route in routes:
assert route in router.routes()
def test_routes_abc(router) -> None:
assert isinstance(router.routes(), Sized)
assert isinstance(router.routes(), Iterable)
assert isinstance(router.routes(), Container)
def test_named_resources_abc(router) -> None:
assert isinstance(router.named_resources(), Mapping)
assert not isinstance(router.named_resources(), MutableMapping)
def test_named_resources(router) -> None:
route1 = router.add_route('GET', '/plain', make_handler(),
name='route1')
route2 = router.add_route('GET', '/variable/{name}',
make_handler(), name='route2')
route3 = router.add_static('/static',
os.path.dirname(aiohttp.__file__),
name='route3')
names = {route1.name, route2.name, route3.name}
assert 3 == len(router.named_resources())
for name in names:
assert name in router.named_resources()
assert isinstance(router.named_resources()[name],
AbstractResource)
def test_resource_iter(router) -> None:
async def handler(request):
pass
resource = router.add_resource('/path')
r1 = resource.add_route('GET', handler)
r2 = resource.add_route('POST', handler)
assert 2 == len(resource)
assert [r1, r2] == list(resource)
def test_deprecate_bare_generators(router) -> None:
resource = router.add_resource('/path')
def gen(request):
yield
with pytest.warns(DeprecationWarning):
resource.add_route('GET', gen)
def test_view_route(router) -> None:
resource = router.add_resource('/path')
route = resource.add_route('GET', View)
assert View is route.handler
def test_resource_route_match(router) -> None:
async def handler(request):
pass
resource = router.add_resource('/path')
route = resource.add_route('GET', handler)
assert {} == route.resource._match('/path')
def test_error_on_double_route_adding(router) -> None:
async def handler(request):
pass
resource = router.add_resource('/path')
resource.add_route('GET', handler)
with pytest.raises(RuntimeError):
resource.add_route('GET', handler)
def test_error_on_adding_route_after_wildcard(router) -> None:
async def handler(request):
pass
resource = router.add_resource('/path')
resource.add_route('*', handler)
with pytest.raises(RuntimeError):
resource.add_route('GET', handler)
async def test_http_exception_is_none_when_resolved(router) -> None:
handler = make_handler()
router.add_route('GET', '/', handler)
req = make_mocked_request('GET', '/')
info = await router.resolve(req)
assert info.http_exception is None
async def test_http_exception_is_not_none_when_not_resolved(router) -> None:
handler = make_handler()
router.add_route('GET', '/', handler)
req = make_mocked_request('GET', '/abc')
info = await router.resolve(req)
assert info.http_exception.status == 404
async def test_match_info_get_info_plain(router) -> None:
handler = make_handler()
router.add_route('GET', '/', handler)
req = make_mocked_request('GET', '/')
info = await router.resolve(req)
assert info.get_info() == {'path': '/'}
async def test_match_info_get_info_dynamic(router) -> None:
handler = make_handler()
router.add_route('GET', '/{a}', handler)
req = make_mocked_request('GET', '/value')
info = await router.resolve(req)
assert info.get_info() == {
'pattern': re.compile(PATH_SEP+'(?P<a>[^{}/]+)'),
'formatter': '/{a}'}
async def test_match_info_get_info_dynamic2(router) -> None:
handler = make_handler()
router.add_route('GET', '/{a}/{b}', handler)
req = make_mocked_request('GET', '/path/to')
info = await router.resolve(req)
assert info.get_info() == {
'pattern': re.compile(PATH_SEP +
'(?P<a>[^{}/]+)' +
PATH_SEP +
'(?P<b>[^{}/]+)'),
'formatter': '/{a}/{b}'}
def test_static_resource_get_info(router) -> None:
directory = pathlib.Path(aiohttp.__file__).parent
resource = router.add_static('/st', directory)
assert resource.get_info() == {'directory': directory,
'prefix': '/st'}
async def test_system_route_get_info(router) -> None:
handler = make_handler()
router.add_route('GET', '/', handler)
req = make_mocked_request('GET', '/abc')
info = await router.resolve(req)
assert info.get_info()['http_exception'].status == 404
def test_resources_view_len(router) -> None:
router.add_resource('/plain')
router.add_resource('/variable/{name}')
assert 2 == len(router.resources())
def test_resources_view_iter(router) -> None:
resource1 = router.add_resource('/plain')
resource2 = router.add_resource('/variable/{name}')
resources = [resource1, resource2]
assert list(resources) == list(router.resources())
def test_resources_view_contains(router) -> None:
resource1 = router.add_resource('/plain')
resource2 = router.add_resource('/variable/{name}')
resources = [resource1, resource2]
for resource in resources:
assert resource in router.resources()
def test_resources_abc(router) -> None:
assert isinstance(router.resources(), Sized)
assert isinstance(router.resources(), Iterable)
assert isinstance(router.resources(), Container)
def test_static_route_user_home(router) -> None:
here = pathlib.Path(aiohttp.__file__).parent
home = pathlib.Path(os.path.expanduser('~'))
if not str(here).startswith(str(home)): # pragma: no cover
pytest.skip("aiohttp folder is not placed in user's HOME")
static_dir = '~/' + str(here.relative_to(home))
route = router.add_static('/st', static_dir)
assert here == route.get_info()['directory']
def test_static_route_points_to_file(router) -> None:
here = pathlib.Path(aiohttp.__file__).parent / '__init__.py'
with pytest.raises(ValueError):
router.add_static('/st', here)
async def test_404_for_static_resource(router) -> None:
resource = router.add_static('/st',
os.path.dirname(aiohttp.__file__))
ret = await resource.resolve(
make_mocked_request('GET', '/unknown/path'))
assert (None, set()) == ret
async def test_405_for_resource_adapter(router) -> None:
resource = router.add_static('/st',
os.path.dirname(aiohttp.__file__))
ret = await resource.resolve(
make_mocked_request('POST', '/st/abc.py'))
assert (None, {'HEAD', 'GET'}) == ret
async def test_check_allowed_method_for_found_resource(router) -> None:
handler = make_handler()
resource = router.add_resource('/')
resource.add_route('GET', handler)
ret = await resource.resolve(make_mocked_request('GET', '/'))
assert ret[0] is not None
assert {'GET'} == ret[1]
def test_url_for_in_static_resource(router) -> None:
resource = router.add_static('/static',
os.path.dirname(aiohttp.__file__))
assert URL('/static/file.txt') == resource.url_for(filename='file.txt')
def test_url_for_in_static_resource_pathlib(router) -> None:
resource = router.add_static('/static',
os.path.dirname(aiohttp.__file__))
assert URL('/static/file.txt') == resource.url_for(
filename=pathlib.Path('file.txt'))
def test_url_for_in_resource_route(router) -> None:
route = router.add_route('GET', '/get/{name}', make_handler(),
name='name')
assert URL('/get/John') == route.url_for(name='John')
def test_subapp_get_info(app) -> None:
subapp = web.Application()
resource = subapp.add_subapp('/pre', subapp)
assert resource.get_info() == {'prefix': '/pre', 'app': subapp}
@pytest.mark.parametrize('domain,error', [
(None, TypeError),
('', ValueError),
('http://dom', ValueError),
('*.example.com', ValueError),
('example$com', ValueError),
])
def test_domain_validation_error(domain, error):
with pytest.raises(error):
Domain(domain)
def test_domain_valid():
assert Domain('example.com:81').canonical == 'example.com:81'
assert MaskDomain('*.example.com').canonical == r'.*\.example\.com'
assert Domain('пуни.код').canonical == 'xn--h1ajfq.xn--d1alm'
@pytest.mark.parametrize('a,b,result', [
('example.com', 'example.com', True),
('example.com:81', 'example.com:81', True),
('example.com:81', 'example.com', False),
('пуникод', 'xn--d1ahgkhc2a', True),
('*.example.com', 'jpg.example.com', True),
('*.example.com', 'a.example.com', True),
('*.example.com', 'example.com', False),
])
def test_match_domain(a, b, result):
if '*' in a:
rule = MaskDomain(a)
else:
rule = Domain(a)
assert rule.match_domain(b) is result
def test_add_subapp_errors(app):
with pytest.raises(TypeError):
app.add_subapp(1, web.Application())
def test_subapp_rule_resource(app):
subapp = web.Application()
subapp.router.add_get('/', make_handler())
rule = Domain('example.com')
assert rule.get_info() == {'domain': 'example.com'}
resource = app.add_domain('example.com', subapp)
assert resource.canonical == 'example.com'
assert resource.get_info() == {'rule': resource._rule, 'app': subapp}
resource.add_prefix('/a')
resource.raw_match('/b')
assert len(resource)
assert list(resource)
assert repr(resource).startswith('<MatchedSubAppResource')
with pytest.raises(RuntimeError):
resource.url_for()
async def test_add_domain_not_str(app, loop):
app = web.Application()
with pytest.raises(TypeError):
app.add_domain(1, app)
async def test_add_domain(app, loop):
subapp1 = web.Application()
h1 = make_handler()
subapp1.router.add_get('/', h1)
app.add_domain('example.com', subapp1)
subapp2 = web.Application()
h2 = make_handler()
subapp2.router.add_get('/', h2)
app.add_domain('*.example.com', subapp2)
subapp3 = web.Application()
h3 = make_handler()
subapp3.router.add_get('/', h3)
app.add_domain('*', subapp3)
request = make_mocked_request('GET', '/', {'host': 'example.com'})
match_info = await app.router.resolve(request)
assert match_info.route.handler is h1
request = make_mocked_request('GET', '/', {'host': 'a.example.com'})
match_info = await app.router.resolve(request)
assert match_info.route.handler is h2
request = make_mocked_request('GET', '/', {'host': 'example2.com'})
match_info = await app.router.resolve(request)
assert match_info.route.handler is h3
request = make_mocked_request('POST', '/', {'host': 'example.com'})
match_info = await app.router.resolve(request)
assert isinstance(match_info.http_exception, HTTPMethodNotAllowed)
def test_subapp_url_for(app) -> None:
subapp = web.Application()
resource = app.add_subapp('/pre', subapp)
with pytest.raises(RuntimeError):
resource.url_for()
def test_subapp_repr(app) -> None:
subapp = web.Application()
resource = app.add_subapp('/pre', subapp)
assert repr(resource).startswith(
'<PrefixedSubAppResource /pre -> <Application')
def test_subapp_len(app) -> None:
subapp = web.Application()
subapp.router.add_get('/', make_handler(), allow_head=False)
subapp.router.add_post('/', make_handler())
resource = app.add_subapp('/pre', subapp)
assert len(resource) == 2
def test_subapp_iter(app) -> None:
subapp = web.Application()
r1 = subapp.router.add_get('/', make_handler(), allow_head=False)
r2 = subapp.router.add_post('/', make_handler())
resource = app.add_subapp('/pre', subapp)
assert list(resource) == [r1, r2]
def test_invalid_route_name(router) -> None:
with pytest.raises(ValueError):
router.add_get('/', make_handler(), name='invalid name')
def test_frozen_router(router) -> None:
router.freeze()
with pytest.raises(RuntimeError):
router.add_get('/', make_handler())
def test_frozen_router_subapp(app) -> None:
subapp = web.Application()
subapp.freeze()
with pytest.raises(RuntimeError):
app.add_subapp('/pre', subapp)
def test_frozen_app_on_subapp(app) -> None:
app.freeze()
subapp = web.Application()
with pytest.raises(RuntimeError):
app.add_subapp('/pre', subapp)
def test_set_options_route(router) -> None:
resource = router.add_static('/static',
os.path.dirname(aiohttp.__file__))
options = None
for route in resource:
if route.method == 'OPTIONS':
options = route
assert options is None
resource.set_options_route(make_handler())
for route in resource:
if route.method == 'OPTIONS':
options = route
assert options is not None
with pytest.raises(RuntimeError):
resource.set_options_route(make_handler())
def test_dynamic_url_with_name_started_from_underscore(router) -> None:
route = router.add_route('GET', '/get/{_name}', make_handler())
assert URL('/get/John') == route.url_for(_name='John')
def test_cannot_add_subapp_with_empty_prefix(app) -> None:
subapp = web.Application()
with pytest.raises(ValueError):
app.add_subapp('', subapp)
def test_cannot_add_subapp_with_slash_prefix(app) -> None:
subapp = web.Application()
with pytest.raises(ValueError):
app.add_subapp('/', subapp)
async def test_convert_empty_path_to_slash_on_freezing(router) -> None:
handler = make_handler()
route = router.add_get('', handler)
resource = route.resource
assert resource.get_info() == {'path': ''}
router.freeze()
assert resource.get_info() == {'path': '/'}
def test_deprecate_non_coroutine(router) -> None:
def handler(request):
pass
with pytest.warns(DeprecationWarning):
router.add_route('GET', '/handler', handler)
def test_plain_resource_canonical() -> None:
canonical = '/plain/path'
res = PlainResource(path=canonical)
assert res.canonical == canonical
def test_dynamic_resource_canonical() -> None:
canonicals = {
'/get/{name}': '/get/{name}',
r'/get/{num:^\d+}': '/get/{num}',
r'/handler/{to:\d+}': r'/handler/{to}',
r'/{one}/{two:.+}': r'/{one}/{two}',
}
for pattern, canonical in canonicals.items():
res = DynamicResource(path=pattern)
assert res.canonical == canonical
def test_static_resource_canonical() -> None:
prefix = '/prefix'
directory = str(os.path.dirname(aiohttp.__file__))
canonical = prefix
res = StaticResource(prefix=prefix, directory=directory)
assert res.canonical == canonical
def test_prefixed_subapp_resource_canonical(app) -> None:
canonical = '/prefix'
subapp = web.Application()
res = subapp.add_subapp(canonical, subapp)
assert res.canonical == canonical
| {
"content_hash": "434c5990816012c60a7e2dc9fe8cd337",
"timestamp": "",
"source": "github",
"line_count": 1248,
"max_line_length": 79,
"avg_line_length": 31.692307692307693,
"alnum_prop": 0.6211569579288025,
"repo_name": "arthurdarcet/aiohttp",
"id": "632fd275d3c4d6fc820bd6865251703d216da70f",
"size": "39579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_urldispatch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "C",
"bytes": "190102"
},
{
"name": "Gherkin",
"bytes": "201"
},
{
"name": "Makefile",
"bytes": "2935"
},
{
"name": "Python",
"bytes": "1627527"
},
{
"name": "Shell",
"bytes": "3023"
}
],
"symlink_target": ""
} |
"""Example use of NrvrCommander.
Idea and first implementation - Leo Baschy <srguiwiz12 AT nrvr DOT com>
Contributor - Nora Baschy
Public repository - https://github.com/srguiwiz/nrvr-commander
Copyright (c) Nirvana Research 2006-2015.
Simplified BSD License"""
import os.path
import shutil
import sys
import tempfile
import time
from nrvr.diskimage.isoimage import IsoImage
from nrvr.distros.common.ssh import LinuxSshCommand
from nrvr.distros.common.util import LinuxUtil
from nrvr.distros.el.gnome import ElGnome
from nrvr.distros.el.kickstart import ElIsoImage, ElKickstartFileContent
from nrvr.distros.el.kickstarttemplates import ElKickstartTemplates
from nrvr.machine.ports import PortsFile
from nrvr.process.commandcapture import CommandCapture
from nrvr.remote.ssh import SshCommand, ScpCommand
from nrvr.util.download import Download
from nrvr.util.ipaddress import IPAddress
from nrvr.util.nameserver import Nameserver
from nrvr.util.requirements import SystemRequirements
from nrvr.util.times import Timestamp
from nrvr.util.user import ScriptUser
from nrvr.vm.vmware import VmdkFile, VmxFile, VMwareHypervisor, VMwareMachine
from nrvr.vm.vmwaretemplates import VMwareTemplates
# this is a good way to preflight check
SystemRequirements.commandsRequiredByImplementations([IsoImage,
VmdkFile, VMwareHypervisor,
SshCommand, ScpCommand],
verbose=True)
# this is a good way to preflight check
VMwareHypervisor.localRequired()
# BEGIN essential example code
ipaddress = "192.168.0.161"
# a possible modification pointed out
# makes sense e.g. if used together with whateverVm.vmxFile.setEthernetAdapter(adapter, "hostonly")
#ipaddress = IPAddress.numberWithinSubnet(VMwareHypervisor.localHostOnlyIPAddress, 161)
rootpw = "redwood"
additionalUsers = []
# some possible choices pointed out
#additionalUsers [("jack","rainbow"),("jill","sunshine")]
# one possible way of making new VM names and directories
name = IPAddress.nameWithNumber("example", ipaddress, separator=None)
exampleVm = VMwareMachine(ScriptUser.loggedIn.userHomeRelative("vmware/examples/%s/%s.vmx" % (name, name)))
# make the virtual machine
exists = exampleVm.vmxFile.exists()
if exists == False:
exampleVm.mkdir()
#
# comment solely regarding .iso files larger than 4GB, e.g. x86_64 Install-DVD,
# there had been issues that almost have gone away with a fixed newer version iso-read,
# there is a fix in libcdio (which provides iso-read) 0.92,
# the remaining issue is you need to make sure you have libcdio 0.92 installed
#
#downloadedDistroIsoImage = ElIsoImage(ScriptUser.loggedIn.userHomeRelative \
# ("Downloads/CentOS-6.6-i386-bin-DVD1.iso"))
downloadedDistroIsoImage = ElIsoImage(Download.fromUrl
("http://mirrors.usc.edu/pub/linux/distributions/centos/6.6/isos/i386/CentOS-6.6-i386-bin-DVD1.iso"))
# some possible choices pointed out
# server w command line only
kickstartFileContent = ElKickstartFileContent(ElKickstartTemplates.usableElKickstartTemplate001)
kickstartFileContent.replaceRootpw(rootpw)
kickstartFileContent.elReplaceHostname(exampleVm.basenameStem)
kickstartFileContent.elReplaceStaticIP(ipaddress, nameservers=Nameserver.list)
# put in DHCP at eth0, to be used with NAT, works well if before hostonly
#kickstartFileContent.elReplaceStaticIP(ipaddress, nameservers=[])
#kickstartFileContent.elAddNetworkConfigurationWithDhcp("eth0")
# some possible modifications pointed out
#kickstartFileContent.replaceAllPackages(ElKickstartTemplates.packagesOfSL64Minimal)
#kickstartFileContent.removePackage("@office-suite")
#kickstartFileContent.addPackage("httpd")
# some other possible modifications pointed out
#kickstartFileContent.replaceAllPackages(ElKickstartTemplates.packagesOfSL64MinimalDesktop)
#kickstartFileContent.elActivateGraphicalLogin()
for additionalUser in additionalUsers:
kickstartFileContent.elAddUser(additionalUser[0], pwd=additionalUser[1])
# some possible modifications pointed out
#kickstartFileContent.setSwappiness(10)
# pick right temporary directory, ideally same as VM
modifiedDistroIsoImage = downloadedDistroIsoImage.cloneWithAutoBootingKickstart \
(kickstartFileContent,
cloneIsoImagePath=os.path.join(exampleVm.directory, "made-to-order-os-install.iso"))
# some necessary choices pointed out
# 32-bit versus 64-bit linux, memsizeMegabytes needs to be more for 64-bit, guestOS is "centos" versus "centos-64"
exampleVm.create(memsizeMegabytes=1200, guestOS="centos", ideDrives=[40000, 300, modifiedDistroIsoImage])
# some possible modifications pointed out
#exampleVm.vmxFile.setMemorySize(1280)
#exampleVm.vmxFile.setNumberOfProcessorCores(2)
#exampleVm.vmxFile.setAccelerate3D()
exampleVm.portsFile.setSsh(ipaddress=ipaddress, user="root", pwd=rootpw)
exampleVm.portsFile.setShutdown()
for additionalUser in additionalUsers:
exampleVm.portsFile.setSsh(ipaddress=ipaddress, user=additionalUser[0], pwd=additionalUser[1])
if additionalUsers:
exampleVm.portsFile.setRegularUser(additionalUsers[0][0])
# some possible modifications pointed out
#exampleVm.vmxFile.setEthernetAdapter(0, "bridged")
# NAT works well if before hostonly
#exampleVm.vmxFile.setEthernetAdapter(0, "nat")
#exampleVm.vmxFile.setEthernetAdapter(1, "hostonly")
# start up for operating system install
VMwareHypervisor.local.start(exampleVm.vmxFilePath, gui=True, extraSleepSeconds=0)
VMwareHypervisor.local.sleepUntilNotRunning(exampleVm.vmxFilePath, ticker=True)
exampleVm.vmxFile.removeAllIdeCdromImages()
modifiedDistroIsoImage.remove()
# start up for accepting known host key
VMwareHypervisor.local.start(exampleVm.vmxFilePath, gui=True, extraSleepSeconds=0)
exampleVm.sleepUntilHasAcceptedKnownHostKey(ticker=True)
# a possible choice pointed out
#if exampleVm.regularUser:
# exampleVm.sshCommand([LinuxUtil.commandToEnableSudo(exampleVm.regularUser)])
# some possible choices pointed out
#if exampleVm.regularUser:
# exampleVm.sshCommand([ElGnome.elCommandToEnableAutoLogin(exampleVm.regularUser)])
# exampleVm.sshCommand([ElGnome.elCommandToDisableScreenSaver()], user=exampleVm.regularUser)
# exampleVm.sshCommand([ElGnome.elCommandToSetSolidColorBackground()], user=exampleVm.regularUser)
# exampleVm.sshCommand([ElGnome.elCommandToDisableUpdateNotifications()], user=exampleVm.regularUser)
# a possible modification pointed out
# append an ipaddress hostname line to /etc/hosts for a smooth automated install of something
# only if no line yet
#print exampleVm.sshCommand(["fgrep -q -e '" + name + "' /etc/hosts || " +
# "echo " + "'" + ipaddress + " " + name + "'" + " >> /etc/hosts"]).output
# a possible modification pointed out
# open firewall port 80 for httpd
# only if no line yet
#print exampleVm.sshCommand(["fgrep -q -e '--dport 80 ' /etc/sysconfig/iptables || " +
# "sed -i -e '/--dport 22 / p' -e 's/--dport 22 /--dport 80 /' /etc/sysconfig/iptables"]).output
# restart firewall
#print exampleVm.sshCommand(["service iptables restart"]).output
# a possible modification pointed out
# copy over some custom installer
#customInstaller = "custom-1.2.3-installer-linux.bin"
#downloadedCustomInstaller = ScriptUser.loggedIn.userHomeRelative(os.path.join("Downloads", customInstaller))
#guestDownloadsDirectory = "/root/Downloads"
#exampleVm.sshCommand(["mkdir -p " + guestDownloadsDirectory])
#guestDownloadedCustomInstaller = os.path.join(guestDownloadsDirectory, customInstaller)
#exampleVm.scpPutCommand(downloadedCustomInstaller, guestDownloadedCustomInstaller)
# a possible modification pointed out
#customInstallPwd = "oakwood"
# install custom software
#print exampleVm.sshCommand([guestDownloadedCustomInstaller +
# " --mode unattended" +
# " --install_password '" + customInstallPwd + "'"]).output
# these ssh commands here are just a demo
print "------"
print exampleVm.sshCommand(["ls", "-al"]).output
print "------"
print exampleVm.sshCommand(["ls nonexistent ; echo `hostname`"]).output
print "------"
# these scp commands here are just a demo
exampleDir = os.path.join(tempfile.gettempdir(), Timestamp.microsecondTimestamp())
os.mkdir(exampleDir, 0755)
try:
sendDir = os.path.join(exampleDir, "send")
os.mkdir(sendDir, 0755)
exampleFile1 = os.path.join(sendDir, "example1.txt")
with open(exampleFile1, "w") as outputFile:
outputFile.write("this is an example\n" * 1000000)
scpExample1 = exampleVm.scpPutCommand(fromHostPath=exampleFile1, toGuestPath="~/example1.txt")
print "returncode=" + str(scpExample1.returncode)
print "output=" + scpExample1.output
scpExample2 = exampleVm.scpGetCommand(fromGuestPath="/etc/hosts", toHostPath=exampleFile1)
print "returncode=" + str(scpExample2.returncode)
print "output=" + scpExample2.output
with open(exampleFile1, "r") as inputFile:
exampleFile1Content = inputFile.read()
print "content=\n" + exampleFile1Content
finally:
shutil.rmtree(exampleDir)
# a good way to shut down the virtual machine
exampleVm.shutdownCommand()
VMwareHypervisor.local.sleepUntilNotRunning(exampleVm.vmxFilePath, ticker=True)
# a possible modification pointed out
# start up again so it is running for use
#VMwareHypervisor.local.start(exampleVm.vmxFilePath, gui=True, extraSleepSeconds=0)
#exampleVm.sleepUntilSshIsAvailable(ticker=True)
# a possible modification pointed out
# start up for showing successful login into GUI
#VMwareHypervisor.local.start(exampleVm.vmxFilePath, gui=True, extraSleepSeconds=0)
#exampleSshParameters = exampleVm.sshParameters(user=exampleVm.regularUser)
#LinuxSshCommand.sleepUntilIsGuiAvailable(exampleSshParameters, ticker=True)
# a possible modification pointed out
# just a demo
#exampleSshParameters = exampleVm.sshParameters(user=exampleVm.regularUser)
#SshCommand(exampleSshParameters, [ElGnome.commandToStartApplicationInGui("firefox about:blank")])
#
print "%s is done with %s, it is ready for you to use at %s" % \
(__file__, exampleVm.basenameStem, exampleVm.portsFile.getPorts(protocol="ssh", user="root")[0]["ipaddress"])
# END essential example code
| {
"content_hash": "1cd030a49576124ab93dee93ae611f63",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 143,
"avg_line_length": 49.38028169014085,
"alnum_prop": 0.756322494770869,
"repo_name": "srguiwiz/nrvr-commander",
"id": "6a15515ccd30a4409d9f4d4846daa536dc030f6c",
"size": "10537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev/examples/make-an-el-vm-001.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "532906"
}
],
"symlink_target": ""
} |
"""
WSGI config for stereo8 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "stereo8.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "fcaadf4c2ae552bb8e21eeb5b5582735",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 27.785714285714285,
"alnum_prop": 0.7737789203084833,
"repo_name": "gjeck/stereo8",
"id": "84e02a60650d7872acd5cd458bca449e84dc956e",
"size": "389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ingestion/stereo8/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "591"
},
{
"name": "HTML",
"bytes": "623"
},
{
"name": "Python",
"bytes": "56938"
}
],
"symlink_target": ""
} |
import logging
from ppci.api import cc
logging.basicConfig(level=logging.DEBUG)
with open('main.c', 'r') as f:
obj = cc(f, 'x86_64', debug=True)
print('Object file created:', obj)
with open('hello.oj', 'w') as f:
obj.save(f)
| {
"content_hash": "95ee5ce3abecde93f7336f1d6199112e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 40,
"avg_line_length": 21.454545454545453,
"alnum_prop": 0.6610169491525424,
"repo_name": "windelbouwman/ppci-mirror",
"id": "7828bf76360066b42e37032284d1eef08d6eefb9",
"size": "236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/c/hello/make.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "94"
},
{
"name": "Brainfuck",
"bytes": "5867"
},
{
"name": "C",
"bytes": "229265"
},
{
"name": "C++",
"bytes": "1257"
},
{
"name": "Coq",
"bytes": "98028"
},
{
"name": "HTML",
"bytes": "363"
},
{
"name": "JavaScript",
"bytes": "2165"
},
{
"name": "LLVM",
"bytes": "11206"
},
{
"name": "Python",
"bytes": "2991165"
},
{
"name": "Shell",
"bytes": "960"
},
{
"name": "Verilog",
"bytes": "9363"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
from onthefly.monkey_patch import patch
class OntheflyConfig(AppConfig):
name = 'onthefly'
patched = False
def ready(self):
if not self.patched:
patch()
self.patched = True
| {
"content_hash": "393540e23cf4469cc22919d036567e78",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 39,
"avg_line_length": 19.615384615384617,
"alnum_prop": 0.6392156862745098,
"repo_name": "baranbartu/onthefly",
"id": "2b057bfece101f5be7263cbcaa11e12d77d7ef9d",
"size": "255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "onthefly/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4711"
},
{
"name": "Python",
"bytes": "10022"
}
],
"symlink_target": ""
} |
import unittest
from katas.kyu_6.exercise_in_summing import maximum_sum, minimum_sum
class ExerciseInSummingTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(minimum_sum([5, 4, 3, 2, 1], 2), 3)
def test_equals_2(self):
self.assertEqual(maximum_sum([5, 4, 3, 2, 1], 3), 12)
def test_equals_3(self):
self.assertEqual(minimum_sum([5, 4, 3, 2, 1], 7), 15)
def test_equals_4(self):
self.assertEqual(minimum_sum([], 3), 0)
| {
"content_hash": "e1f7371a786b23361c8b48062039e00e",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 68,
"avg_line_length": 28.823529411764707,
"alnum_prop": 0.636734693877551,
"repo_name": "the-zebulan/CodeWars",
"id": "f2f7298865f53f1486356d05b8ee27593dc2bb17",
"size": "490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/kyu_6_tests/test_exercise_in_summing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1203000"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from collections import defaultdict
import json
import socket
import sys
import geoip2.database
eppn_domains = defaultdict(int)
email_domains = defaultdict(int)
def process_email(x):
parts = x.split('@')
if len(parts) != 2:
print('error: %r' % (x), file=sys.stderr)
else:
domain = parts[1]
rev_domain = domain.split('.')
rev_domain.reverse()
if rev_domain[0] in ['edu', 'gr', 'br', 'de', 'it', 'kr']:
# limit these domains to the top level
rev_domain = rev_domain[:2]
if rev_domain[-1] in ['student', 'students']:
# remove this leading subdomain
rev_domain = rev_domain[:-1]
rev_domain.reverse()
domain = '.'.join(rev_domain)
email_domains[domain] = email_domains[domain] + 1
def process_eppn(x):
parts = x.split('@')
if len(parts) != 2:
pass
else:
domain = parts[1]
eppn_domains[domain] = eppn_domains[domain] + 1
def process_line(x):
(eppn, email) = x.split(',')
eppn = eppn.strip().lower()
email = email.strip().lower()
if 'gpolab.bbn.com' in eppn:
process_email(email)
else:
process_eppn(eppn)
with open('eppn-email.csv') as f:
for line in f:
process_line(line)
# combine email and eppn domains
for d in sorted(email_domains):
eppn_domains[d] = eppn_domains[d] + email_domains[d]
#for d in eppn_domains:
# print d, eppn_domains[d]
def make_site(domain, geodata, count):
return dict(name=domain,
count=count,
latitude=geodata.location.latitude,
longitude=geodata.location.longitude,
country=geodata.country.iso_code)
reader = geoip2.database.Reader('GeoLite2-City.mmdb')
domains = []
for d in eppn_domains:
try:
ipaddr = socket.gethostbyname(d)
x = reader.city(ipaddr)
domains.append(make_site(d, x, eppn_domains[d]))
except:
d2 = 'www.' + d
try:
ipaddr = socket.gethostbyname(d2)
x = reader.city(ipaddr)
domains.append(make_site(d, x, eppn_domains[d]))
except:
msg = "DNS FAILURE: %s, %s (count=%d)" % (d, d2, eppn_domains[d])
print(msg, file=sys.stderr)
msg = "Found locations for %d domains" % (len(domains))
print(msg, file=sys.stderr)
print(json.dumps(domains))
| {
"content_hash": "4e6f9d12dae78b8a892d963b9a54509f",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 77,
"avg_line_length": 28.647058823529413,
"alnum_prop": 0.5835728952772073,
"repo_name": "GENI-NSF/geni-demoviz",
"id": "df398332e40ec33b5b0111f1569dfd59c32dd50d",
"size": "5110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "genisites.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "318"
},
{
"name": "HTML",
"bytes": "10257"
},
{
"name": "JavaScript",
"bytes": "78902"
},
{
"name": "PHP",
"bytes": "55271"
},
{
"name": "PLpgSQL",
"bytes": "190"
},
{
"name": "Python",
"bytes": "73205"
}
],
"symlink_target": ""
} |
import re
import sys
import pysam
from subprocess import Popen, PIPE
from collections import defaultdict
import pickle
pi_IN="/lscr2/andersenlab/kml436/git_repos2/Transposons2/files/WB_piRNA_positions.gff"
reference="/lscr2/andersenlab/kml436/sv_sim2/c_elegans.PRJNA13758.WS245.genomic.fa"
pi_fasta="/lscr2/andersenlab/kml436/git_repos2/Transposons2/files/piRNAs.fasta"
TE_consensus="/lscr2/andersenlab/kml436/git_repos2/Transposons2/files/SET2/round2_consensus_set2.fasta"
family_renames="/lscr2/andersenlab/kml436/git_repos2/Transposons2/files/round2_WB_familes_set2.txt"
# put shortened WB family names into a dictionary
renames={}
with open(family_renames, 'r') as IN:
for line in IN:
line=line.rstrip('\n')
items=re.split('\t',line)
element,family=items[0:2]
renames[element]=family
OUT=open("/lscr2/andersenlab/kml436/git_repos2/Transposons2/files/piRNA_regions.bed", 'w')
with open(pi_IN, 'r') as IN:
for line in IN:
line=line.rstrip('\n')
items=re.split('\t',line)
chromsome,WB,pi,start,end=items[0:5]
orient=items[6]
piRNA=items[8]
start=int(start)-1
OUT.write("{chromsome}\t{start}\t{end}\t{piRNA}\t.\t{orient}\n".format(**locals()))
OUT.close()
# get fasta seqs of piRNAs
cmd="bedtools getfasta -s -name -fi {reference} -bed /lscr2/andersenlab/kml436/git_repos2/Transposons2/files/piRNA_regions.bed -fo {pi_fasta}".format(**locals())
result, err = Popen([cmd],stdout=PIPE, stderr=PIPE, shell=True).communicate()
# create bwa index for TE seqs of that family
cmd="bwa index {TE_consensus}".format(**locals())
result, err = Popen([cmd],stdout=PIPE, stderr=PIPE, shell=True).communicate()
OUT_SUMMARY=open("summary_mismatches_BWA.txt", 'w')
OUT_SUMMARY.write("Number of Mismatches\tNumber Unique piRNAs Aligned\tNumber Unique Transposons\n")
OUT_SUMMARY_STRICT=open("summary_mismatches_BWA_strict.txt", 'w')
OUT_SUMMARY_STRICT.write("Number of Mismatches\tNumber Unique piRNAs Aligned\tNumber Unique Transposons\n")
BWA_PAIRS=open("BWA_pairs.txt", 'w')
seen={}
def align(mismatches,strict=False):
TE_consensus="/lscr2/andersenlab/kml436/git_repos2/Transposons2/files/SET2/round2_consensus_set2.fasta"
pi_fasta="/lscr2/andersenlab/kml436/git_repos2/Transposons2/files/piRNAs.fasta"
family_renames="/lscr2/andersenlab/kml436/git_repos2/Transposons2/files/round2_WB_familes_set2.txt"
# run bwa aln
cmd = "bwa aln -o 0 -n {mismatches} -t 2 {TE_consensus} {pi_fasta} > {mismatches}_pi_v_TE.sai".format(**locals())
result, err = Popen([cmd],stdout=PIPE, stderr=PIPE, shell=True).communicate()
cmd = "bwa samse {TE_consensus} {mismatches}_pi_v_TE.sai {pi_fasta} > {mismatches}_pi_v_TE.sam".format(**locals())
result, err = Popen([cmd],stdout=PIPE, stderr=PIPE, shell=True).communicate()
cmd = "perl /lscr2/andersenlab/kml436/git_repos2/Transposons2/scripts/xa2multi.pl {mismatches}_pi_v_TE.sam > {mismatches}_MA.sam".format(**locals())
result, err = Popen([cmd],stdout=PIPE, stderr=PIPE, shell=True).communicate()
#add MD field for the multi-mapped reads
cmd = "samtools calmd -S {mismatches}_MA.sam /lscr2/andersenlab/kml436/git_repos2/Transposons2/files/SET2/round2_consensus_set2.fasta > {mismatches}_MAMD.sam".format(**locals())
result, err = Popen([cmd],stdout=PIPE, stderr=PIPE, shell=True).communicate()
# rename headers and read info
OUT=open("{mismatches}_renamed.sam".format(**locals()), 'w')
with open("{mismatches}_MAMD.sam".format(**locals()) ,'r') as IN:
for line in IN:
line=line.rstrip()
items=re.split('\t',line)
TE=items[2]
if TE in renames.keys():
TE=renames[TE]
items[2]=TE
if re.search('^@SQ',line):
sn=items[1]
match=re.search("SN:(.*)",sn)
element=match.group(1)
if element in renames.keys():
trans=renames[element]
items[1]="SN:"+ trans
new_line='\t'.join(items[0:])
OUT.write(new_line + '\n')
OUT.close()
cmd = "samtools view -bS -F4 {mismatches}_renamed.sam > {mismatches}_renamed.bam".format(**locals()) # filter out unmapped reads
result, err = Popen([cmd],stdout=PIPE, stderr=PIPE, shell=True).communicate()
cmd = "samtools sort -o -@ 8 {mismatches}_renamed.bam out > {mismatches}_renamed.sorted.bam".format(**locals())
result, err = Popen([cmd],stdout=PIPE, stderr=PIPE, shell=True).communicate()
cmd = "samtools index {mismatches}_renamed.sorted.bam".format(**locals())
result, err = Popen([cmd],stdout=PIPE, stderr=PIPE, shell=True).communicate()
cmd = "samtools flagstat {mismatches}_renamed.sorted.bam > {mismatches}_stats.txt".format(**locals())
result, err = Popen([cmd],stdout=PIPE, stderr=PIPE, shell=True).communicate()
cmd = "samtools view {mismatches}_renamed.sorted.bam |cut -f1|sort|uniq |wc -l".format(**locals())
unique_pis, err = Popen([cmd],stdout=PIPE, stderr=PIPE, shell=True).communicate()
cmd = "samtools view {mismatches}_renamed.sorted.bam |cut -f3|sort|uniq |wc -l".format(**locals())
unique_TEs, err = Popen([cmd],stdout=PIPE, stderr=PIPE, shell=True).communicate()
unique_pis=re.sub('\n','',unique_pis)
unique_TEs=re.sub('\n','',unique_TEs)
OUT_SUMMARY.write("{mismatches}\t{unique_pis}\t{unique_TEs}\n".format(**locals()))
OUT_ALL_STRICT=open("{mismatches}_strict.txt".format(**locals()), 'w')
alignments=defaultdict(list)
Bfile = pysam.AlignmentFile("{mismatches}_renamed.sorted.bam".format(**locals()), "rb")
seen_pis={}
seen_TEs={}
seen_pis_strict={}
seen_TEs_strict={}
Binfo = Bfile.fetch()
for x in Binfo:
query = x.query_name
TE = Bfile.getrname(x.reference_id)
flag = x.flag
MD = x.get_tag('MD')
match = re.search("(?:Pseudogene|Transcript|sequence_name|^Name)(?:=|:)([\w|\d]+.\d+)", query) #jsut pull gene name, remove splice info
pi_transcript =match.group(1)
if query=="*":
print "TTTTT"
MD_nums=re.findall('\d+', MD)
first_digit=int(MD_nums[0])
last_digit=int(MD_nums[-1])
seen_pis[query]=0
seen_TEs[TE]=0
family_short=re.sub("_CE$","",TE)
family_short=re.sub("WBTransposon","WBT",family_short)
pair=family_short + "_" + pi_transcript
# enforce no mismatches in first 8 bps of the piRNA
if strict:
if (flag==0 or flag==256): # and first_digit>=8
if pair not in seen.keys():
seen_pis_strict[query]=0
seen_TEs_strict[TE]=0
OUT_ALL_STRICT.write(str(x)+'\n')
alignments[family_short].append(pi_transcript)
BWA_PAIRS.write("{pi_transcript}\t{family_short}\t{mismatches}\n".format(**locals()))
seen[pair]=0
elif (flag==16 or flag==272): # and last_digit>=8
if pair not in seen.keys():
seen_pis_strict[query]=0
seen_TEs_strict[TE]=0
OUT_ALL_STRICT.write(str(x)+'\n')
alignments[family_short].append(pi_transcript)
BWA_PAIRS.write("{pi_transcript}\t{family_short}\t{mismatches}\n".format(**locals()))
seen[pair]=0
elif flag !=0 and flag !=16 and flag!=256 and flag!=272:
sys.exit("ERROR: Flag %s not accounted for, exiting..." %flag)
if strict:
no_pis_strict = len(seen_pis_strict.keys())
no_TEs_strict = len(seen_TEs_strict.keys())
OUT_SUMMARY_STRICT.write("{mismatches}\t{no_pis_strict}\t{no_TEs_strict}\n".format(**locals()))
with open("strict_alignments_{mismatches}.txt".format(**locals()), "wb") as fp: # Pickle
pickle.dump(alignments, fp)
no_pis = len(seen_pis.keys())
no_TEs = len(seen_TEs.keys())
# make sure methods of counting uniqueness are the same
if int(no_pis) != int(unique_pis):
sys.exit("ERROR: Inconsistency in unique counts,exiting...")
if int(no_TEs) != int(unique_TEs):
sys.exit("ERROR: Inconsistency in unique counts,exiting...")
OUT_ALL_STRICT.close()
align(0,strict=True)
align(1,strict=True)
align(2,strict=True)
align(3,strict=True)
align(4,strict=True)
align(5,strict=True)
OUT_SUMMARY.close()
OUT_SUMMARY_STRICT.close()
BWA_PAIRS.close()
| {
"content_hash": "1db5371cc444d2567fa1cbe440def717",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 178,
"avg_line_length": 36.54716981132076,
"alnum_prop": 0.697212183789365,
"repo_name": "klaricch/Transposons2",
"id": "a485a01d3ea8b71e2f90059e3db8d33361f3886f",
"size": "7935",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/piBWA.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "731"
},
{
"name": "Python",
"bytes": "337144"
},
{
"name": "R",
"bytes": "68986"
},
{
"name": "Shell",
"bytes": "732668"
}
],
"symlink_target": ""
} |
"""
sentry_kafka.models
~~~~~~~~~~~~~~~~~~~~~
"""
from django import forms
from django.conf import settings
from django.core import (validators, exceptions)
from kafka import KafkaClient, SimpleProducer
from sentry.plugins.bases.notify import NotifyPlugin
import sentry_kafka
import json
import re
import types
def KafkaOptionsFormValidateDots(value):
if value == '.' or value == '..':
raise exceptions.ValidationError('Topic cannot be "." or ".."')
class KafkaOptionsForm(forms.Form):
valid_topic_expr = re.compile('^[-_.a-z0-9]+$', re.IGNORECASE)
kafka_instance = forms.CharField(
help_text="Your Kafka broker connection string (may be a comma separated list of brokers)",
required=True)
topic = forms.CharField(
help_text="Kafka topic - will use \"Organization.Team.Project\" by default",
required=False, max_length=255,
validators=[validators.RegexValidator(
regex=valid_topic_expr,
message='Topics may only include alphanumeric characters, numbers, periods, dashes and underscores'),
validators.MaxLengthValidator(255),
KafkaOptionsFormValidateDots])
assume_topic_exists = forms.BooleanField(
help_text="Do not check for existence or manually create the topic before sending the message.",
initial=False, required=False)
def __init__(self, *args, **kwargs):
super(KafkaOptionsForm, self).__init__(*args, **kwargs)
# If the broker is set in the settings configuration, disable the field
if settings.KAFKA_BROKERS:
self.fields['kafka_instance'].widget.attrs['disabled'] = True
self.fields['kafka_instance'].required = False
self.fields['kafka_instance'].initial = settings.KAFKA_BROKERS
def clean(self):
super(KafkaOptionsForm, self).clean()
if settings.KAFKA_BROKERS:
self.cleaned_data['kafka_instance'] = settings.KAFKA_BROKERS
return self.cleaned_data
class KafkaMessage(NotifyPlugin):
author = 'Chad Killingsworth, Jack Henry and Associates'
author_url = 'https://github.com/banno/getsentry-kafka'
version = sentry_kafka.VERSION
description = "Forward events to Kafka for logging."
resource_links = [
('Bug Tracker', 'https://github.com/banno/getsentry-kafka/issues'),
('Source', 'https://github.com/banno/getsentry-kafka'),
]
slug = 'kafka'
title = 'Kafka Logging'
conf_title = title
conf_key = 'kafka'
project_conf_form = KafkaOptionsForm
timeout = getattr(settings, 'SENTRY_KAFKA_TIMEOUT', 3)
invalid_topic_chars_expr = re.compile(r'[^-a-z0-9]+', re.IGNORECASE)
def is_configured(self, project):
return all((self.get_option(k, project) for k in ('kafka_instance')))
def notify(self, notification):
project = notification.event.project
team_name = notification.event.project.team.name,
organization_name = notification.event.project.organization.name,
project_name = notification.event.project.name,
topic = self.get_option('topic',
project) or KafkaMessage.get_default_topic(
organization_name, team_name, project_name)
endpoint = (settings.KAFKA_BROKERS or
self.get_option('kafka_instance', project))
assume_topic_exists = self.get_option('assume_topic_exists',
project) or False
topic = topic[0:255] # Kafka topics must be at most 255 characters
if endpoint:
self.send_payload(
endpoint=endpoint,
topic=topic,
message='{"type":"ALERT","org":"%(organization_name)s","team":"%(team_name)s",' +
'"project":"%(project_name)s","platform":"%(platform)s","message":"%(message)s"' +
'"data":%(data)s}' % {
'organization_name': organization_name,
'team_name': team_name,
'project_name': project_name,
'message': notification.event.error(),
'data': json.dumps(notification.event.as_dict(),
default=KafkaMessage.date_serializer)
},
ensure_topic_exists=not assume_topic_exists
)
def send_payload(self, endpoint, topic, message, ensure_topic_exists=True):
kafka = KafkaClient(endpoint)
if ensure_topic_exists:
kafka.ensure_topic_exists(topic)
producer = SimpleProducer(kafka, async=True)
producer.send_messages(topic, message)
@staticmethod
def get_default_topic(organization, team, project):
return ('%s.%s.%s' % (
KafkaMessage.invalid_topic_chars_expr.sub('_',
KafkaMessage.list_to_string(organization)),
KafkaMessage.invalid_topic_chars_expr.sub('_',
KafkaMessage.list_to_string(team)),
KafkaMessage.invalid_topic_chars_expr.sub('_',
KafkaMessage.list_to_string(project))
))
@staticmethod
def list_to_string(obj):
return str(obj) if isinstance(obj, types.StringTypes) else str(obj[0])
@staticmethod
def date_serializer(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
| {
"content_hash": "a9fe683ae852ac5c3bfed4d10ae7a510",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 113,
"avg_line_length": 39.5,
"alnum_prop": 0.6191362620997766,
"repo_name": "Banno/getsentry-kafka",
"id": "4e5218f58c6b2b47d8710a5072e112a6a0e3f463",
"size": "5372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sentry_kafka/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6828"
}
],
"symlink_target": ""
} |
import xml.etree.ElementTree as et
class Include:
"""Meta plugin that allows you to include other config files, will be declared as <obj type="Include" config="other"/> A name is not needed."""
def __init__(self, xml):
# Get the name of the config to load...
self.config = xml.get('config')
def postInit(self):
# Load the configuration xml file...
elem = et.parse(manager.getConfigDir() + self.config + '.xml')
yield None
# Iterate the relevant elements and use the manager to load them all...
for obj in elem.findall('obj'):
for blah in manager.addObj(obj):
yield None
def reload(self, xml):
pass
def start(self):
pass
def stop(self):
pass
def destroy(self):
pass
| {
"content_hash": "da976c2497431986e46c0242617ab521",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 147,
"avg_line_length": 27.233333333333334,
"alnum_prop": 0.591187270501836,
"repo_name": "gamingrobot/second-sunrise",
"id": "09a52d13ae0a1fb6dfe62df71c2fccefac638d40",
"size": "1389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "game/plugins/core/include/include.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "26252"
},
{
"name": "Python",
"bytes": "142803"
},
{
"name": "Shell",
"bytes": "360"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.